diff --git a/.gitignore b/.gitignore index b8bc7216f..8cc03b0f2 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,8 @@ _research web/node_modules web/dist web/coverage +web/.yalc +web/yalc.lock yarn-error* release .idea @@ -18,6 +20,12 @@ web/cypress/node_modules *.rsa *.rsa.pub node_modules/ +dist +.yalc +yalc.lock +node_modules # ignore any executables we build /gitness +/registry/logs/* +/distribution-spec diff --git a/.golangci.yml b/.golangci.yml index c50920bbc..1bb0b1ee7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -287,6 +287,8 @@ issues: linters: [ govet ] - source: "^//\\s*go:generate\\s" linters: [ lll ] + - text: 'local replacement are not allowed: github.com/harness/gitness' + linters: [ gomoddirectives ] - text: 'replacement are not allowed: github.com/docker/docker' linters: [ gomoddirectives ] - source: "(noinspection|TODO)" @@ -297,6 +299,192 @@ issues: linters: [ errorlint ] - path: "^cli/" linters: [forbidigo] + #Registry Specific + - path: "^registry/app/manifest/.*" + linters: [ tagliatelle, staticcheck, revive ] + - path: "^registry/app/dist_temp/.*" + linters: [ errorlint ] + - path: "^registry/app/driver/filesystem/.*" + linters: [ gocritic ] + - path: "^registry/app/driver/s3-aws/.*" + linters: [ gocognit, gocyclo, gosec, nestif, cyclop] + - path: "^registry/app/remote/clients/registry/interceptor/interceptor.go" + linters: [ goheader ] + - path: "^registry/app/common/http/modifier/modifier.go" + linters: [ goheader ] + - path: "^registry/app/driver/fileinfo.go" + linters: [ goheader ] + - path: "^registry/app/driver/storagedriver.go" + linters: [ goheader ] + - path: "^registry/app/driver/walk.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/challenge/addr.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/challenge/authchallenge.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/challenge/authchallenge_test.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/requestutil/util.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/requestutil/util_test.go" + linters: [ goheader ] + - path: "^registry/app/manifest/descriptor.go" + linters: [ goheader ] + - path: "^registry/app/manifest/doc.go" + linters: [ goheader ] + - path: "^registry/app/manifest/errors.go" + linters: [ goheader ] + - path: "^registry/app/manifest/manifests.go" + linters: [ goheader ] + - path: "^registry/app/manifest/versioned.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/authorizer.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/link.go" + linters: [ goheader ] + - path: "^registry/app/common/http/tls.go" + linters: [ goheader ] + - path: "^registry/app/common/http/transport.go" + linters: [ goheader ] + - path: "^registry/app/common/http/transport_test.go" + linters: [ goheader ] + - path: "^registry/app/manifest/schema2/manifest.go" + linters: [ goheader ] + - path: "^registry/app/manifest/schema2/manifest_test.go" + linters: [ goheader ] + - path: "^registry/app/manifest/ocischema/index.go" + linters: [ goheader ] + - path: "^registry/app/manifest/ocischema/manifest.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/null/authorizer.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/basic/authorizer.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/basic/authorizer_test.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/errors/const.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/errors/errors.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/errors/stack.go" + linters: [ goheader ] + - path: "^registry/app/common/lib/errors/stack_test.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/bearer/authorizer.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/bearer/cache.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/bearer/scope.go" + linters: [ goheader ] + - path: "^registry/app/manifest/manifestlist/manifestlist.go" + linters: [ goheader ] + - path: "^registry/app/manifest/manifestlist/manifestlist_test.go" + linters: [ goheader ] + - path: "^registry/app/driver/factory/factory.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/context.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/doc.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/http.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/logger.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/trace.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/util.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/version.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/http_test.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/trace_test.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/dcontext/version_test.go" + linters: [ goheader ] + - path: "^registry/app/driver/base/base.go" + linters: [ goheader ] + - path: "^registry/app/driver/base/regulator.go" + linters: [ goheader ] + - path: "^registry/app/driver/base/regulator_test.go" + linters: [ goheader ] + - path: "^registry/app/storage/blobs.go" + linters: [ goheader ] + - path: "^registry/app/storage/blobwriter.go" + linters: [ goheader ] + - path: "^registry/app/storage/blobwriter_resumable.go" + linters: [ goheader ] + - path: "^registry/app/storage/errors.go" + linters: [ goheader ] + - path: "^registry/app/storage/filereader.go" + linters: [ goheader ] + - path: "^registry/app/storage/gcstoragelient.go" + linters: [ goheader ] + - path: "^registry/app/storage/io.go" + linters: [ goheader ] + - path: "^registry/app/storage/middleware.go" + linters: [ goheader ] + - path: "^registry/app/storage/ociblobstore.go" + linters: [ goheader ] + - path: "^registry/app/storage/paths.go" + linters: [ goheader ] + - path: "^registry/app/storage/storageservice.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/client.go" + linters: [ goheader ] + - path: "^registry/app/remote/adapter/adapter.go" + linters: [ goheader ] + - path: "^registry/app/remote/clients/registry/auth/authorizer.go" + linters: [ goheader ] + - path: "^registry/app/driver/s3-aws/s3.go" + linters: [ goheader ] + - path: "^registry/app/driver/s3-aws/s3_v2_signer.go" + linters: [ goheader ] + - path: "^registry/app/driver/filesystem/driver.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/app.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/catalog.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/compat.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/context.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/controller.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/local.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/manifest_service.go" + linters: [ goheader ] + - path: "^registry/app/pkg/docker/remote.go" + linters: [ goheader ] + - path: "^registry/app/remote/adapter/dockerhub/adapter.go" + linters: [ goheader ] + - path: "^registry/app/remote/adapter/dockerhub/client.go" + linters: [ goheader ] + - path: "^registry/app/remote/adapter/dockerhub/consts.go" + linters: [ goheader ] + - path: "^registry/app/driver/testsuites/testsuites.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/errcode/errors.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/errcode/handler.go" + linters: [ goheader ] + - path: "^registry/app/dist_temp/errcode/register.go" + linters: [ goheader ] + - path: "^registry/app/remote/controller/proxy/controller.go" + linters: [ goheader ] + - path: "^registry/app/remote/controller/proxy/inflight.go" + linters: [ goheader ] + - path: "^registry/app/remote/controller/proxy/local.go" + linters: [ goheader ] + - path: "^registry/app/remote/controller/proxy/remote.go" + linters: [ goheader ] + - path: "^registry/app/remote/controller/proxy/inflight_test.go" + linters: [ goheader ] + - path: "^registry/app/remote/adapter/native/adapter.go" + linters: [ goheader ] + #Registry Specific ends - text: "mnd: Magic number: \\d" linters: - gomnd diff --git a/.local.env b/.local.env index 20d2b78ff..9d1cee2fe 100644 --- a/.local.env +++ b/.local.env @@ -10,3 +10,8 @@ GITNESS_DEBUG=true GITNESS_DOCKER_API_VERSION=1.41 GITNESS_SSH_ENABLE=true GITNESS_SSH_HOST=localhost +GITNESS_SSH_PORT=2222 + +GITNESS_REGISTRY_STORAGE_TYPE=filesystem +GITNESS_REGISTRY_ENABLED=false +GITNESS_REGISTRY_FILESYSTEM_ROOT_DIRECTORY=/tmp diff --git a/Makefile b/Makefile index 6811519ad..11ff327df 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ tools: $(tools) ## Install tools required for the build ############################################################################### # -# Build and testing rules +# Gitness Build and testing rules # ############################################################################### @@ -47,6 +47,43 @@ test: generate ## Run the go tests go test -v -coverprofile=coverage.out ./... go tool cover -html=coverage.out + + +############################################################################### +# +# Artifact Registry Build and testing rules +# +############################################################################### + +run: clean build + ./gitness server .local.env || true + +ar-conformance-test: clean build + ./gitness server .local.env > logfile.log 2>&1 & echo $$! > server.PID + @sleep 10 + ./registry/tests/conformance_test.sh localhost:3000 || true + kill `cat server.PID` + @rm server.PID + @rm logfile.log + +ar-hot-conformance-test: + rm -rf distribution-spec || true + ./registry/tests/conformance_test.sh localhost:3000 || true + +ar-api-update: + @set -e; \ + oapi-codegen --config ./registry/config/openapi/artifact-services.yaml ./registry/app/api/openapi/api.yaml; \ + oapi-codegen --config ./registry/config/openapi/artifact-types.yaml ./registry/app/api/openapi/api.yaml; + +ar-clean: + @rm artifact-registry 2> /dev/null || true + @docker stop ps_artifacthub 2> /dev/null || true + rm -rf distribution-spec + @kill -9 $$(lsof -t -i:3000) || true + @rm server.PID || true + @rm logfile.log || true + go clean + ############################################################################### # # Code Formatting and linting diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..a9f2b1716 --- /dev/null +++ b/NOTICE @@ -0,0 +1,12 @@ +Copyright 2024 Harness, Inc. + +This product includes software developed at + +https://github.com/goharbor/harbor +Licensed under the Apache License, Version 2.0 + +https://github.com/distribution/distribution +Licensed under the Apache License, Version 2.0 + +https://gitlab.com/gitlab-org/container-registry +Licensed under the Apache License, Version 2.0 diff --git a/README.md b/README.md index 26289a051..1cba18608 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,14 @@ To regenerate the code, please execute the following steps: The latest API changes should now be reflected in `web/src/services/code/index.tsx` +# Run Registry Conformance Tests +``` +make conformance-test +``` +For running conformance tests with existing running service, use: +``` +make hot-conformance-test +``` ## User Interface @@ -104,6 +112,7 @@ This project includes a full user interface for interacting with the system. Whe ## REST API This project includes a swagger specification. When you run the application, you can access the swagger specification by navigating to `http://localhost:3000/swagger` in your browser (for raw yaml see `http://localhost:3000/openapi.yaml`). +For registry endpoints, currently swagger is located on different endpoint `http://localhost:3000/registry/swagger/` (for raw json see `http://localhost:3000/registry/swagger.json`). These will be later moved to the main swagger endpoint. For testing, it's simplest to just use the cli to create a token (this requires gitness server to run): diff --git a/app/api/auth/auth.go b/app/api/auth/auth.go index 4e628eac8..c9f0b5554 100644 --- a/app/api/auth/auth.go +++ b/app/api/auth/auth.go @@ -38,7 +38,8 @@ var ( // Check checks if a resource specific permission is granted for the current auth session in the scope. // Returns nil if the permission is granted, otherwise returns an error. // NotAuthenticated, NotAuthorized, or any underlying error. -func Check(ctx context.Context, authorizer authz.Authorizer, session *auth.Session, +func Check( + ctx context.Context, authorizer authz.Authorizer, session *auth.Session, scope *types.Scope, resource *types.Resource, permission enum.Permission, ) error { authorized, err := authorizer.Check( @@ -46,7 +47,31 @@ func Check(ctx context.Context, authorizer authz.Authorizer, session *auth.Sessi session, scope, resource, - permission) + permission, + ) + if err != nil { + return err + } + + if !authorized { + return ErrNotAuthorized + } + + return nil +} + +// CheckAll checks if multiple resources specific permission is granted for the current auth session in the scope. +// Returns nil if the permission is granted, otherwise returns an error. +// NotAuthenticated, NotAuthorized, or any underlying error. +func CheckAll( + ctx context.Context, authorizer authz.Authorizer, session *auth.Session, + permissionChecks ...types.PermissionCheck, +) error { + authorized, err := authorizer.CheckAll( + ctx, + session, + permissionChecks..., + ) if err != nil { return err } @@ -62,9 +87,11 @@ func Check(ctx context.Context, authorizer authz.Authorizer, session *auth.Sessi // in the scope of a parent. // Returns nil if the permission is granted, otherwise returns an error. // NotAuthenticated, NotAuthorized, or any underlying error. -func CheckChild(ctx context.Context, authorizer authz.Authorizer, session *auth.Session, +func CheckChild( + ctx context.Context, authorizer authz.Authorizer, session *auth.Session, spaceStore store.SpaceStore, repoStore store.RepoStore, parentType enum.ParentResourceType, parentID int64, - resourceType enum.ResourceType, resourceName string, permission enum.Permission) error { + resourceType enum.ResourceType, resourceName string, permission enum.Permission, +) error { scope, err := getScopeForParent(ctx, spaceStore, repoStore, parentType, parentID) if err != nil { return err @@ -79,8 +106,10 @@ func CheckChild(ctx context.Context, authorizer authz.Authorizer, session *auth. } // getScopeForParent Returns the scope for a given resource parent (space or repo). -func getScopeForParent(ctx context.Context, spaceStore store.SpaceStore, repoStore store.RepoStore, - parentType enum.ParentResourceType, parentID int64) (*types.Scope, error) { +func getScopeForParent( + ctx context.Context, spaceStore store.SpaceStore, repoStore store.RepoStore, + parentType enum.ParentResourceType, parentID int64, +) (*types.Scope, error) { // TODO: Can this be done cleaner? switch parentType { case enum.ParentResourceTypeSpace: diff --git a/app/api/auth/registry.go b/app/api/auth/registry.go new file mode 100644 index 000000000..d8f567cbd --- /dev/null +++ b/app/api/auth/registry.go @@ -0,0 +1,36 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + + "github.com/harness/gitness/app/auth" + "github.com/harness/gitness/app/auth/authz" + "github.com/harness/gitness/types" +) + +// CheckRegistry checks if a registry specific permission is granted for the current auth session +// in the scope of its parent. +// Returns nil if the permission is granted, otherwise returns an error. +// NotAuthenticated, NotAuthorized, or any underlying error. +func CheckRegistry( + ctx context.Context, + authorizer authz.Authorizer, + session *auth.Session, + permissionChecks ...types.PermissionCheck, +) error { + return CheckAll(ctx, authorizer, session, permissionChecks...) +} diff --git a/app/api/controller/secret/create.go b/app/api/controller/secret/create.go index 548351110..ba00cb2f4 100644 --- a/app/api/controller/secret/create.go +++ b/app/api/controller/secret/create.go @@ -126,7 +126,7 @@ func enc(encrypt encrypt.Encrypter, secret *types.Secret) (*types.Secret, error) } // helper function returns the same secret with decrypted data. -func dec(encrypt encrypt.Encrypter, secret *types.Secret) (*types.Secret, error) { +func Dec(encrypt encrypt.Encrypter, secret *types.Secret) (*types.Secret, error) { if secret == nil { return nil, fmt.Errorf("cannot decrypt a nil secret") } diff --git a/app/api/controller/secret/find.go b/app/api/controller/secret/find.go index 7923e32a0..b70c9d667 100644 --- a/app/api/controller/secret/find.go +++ b/app/api/controller/secret/find.go @@ -42,7 +42,7 @@ func (c *Controller) Find( if err != nil { return nil, fmt.Errorf("failed to find secret: %w", err) } - secret, err = dec(c.encrypter, secret) + secret, err = Dec(c.encrypter, secret) if err != nil { return nil, fmt.Errorf("could not decrypt secret: %w", err) } diff --git a/app/api/controller/user/login.go b/app/api/controller/user/login.go index 061baff0e..846513809 100644 --- a/app/api/controller/user/login.go +++ b/app/api/controller/user/login.go @@ -69,7 +69,7 @@ func (c *Controller) Login( return nil, usererror.ErrNotFound } - tokenIdentifier, err := generateSessionTokenIdentifier() + tokenIdentifier, err := GenerateSessionTokenIdentifier() if err != nil { return nil, err } @@ -81,7 +81,7 @@ func (c *Controller) Login( return &types.TokenResponse{Token: *token, AccessToken: jwtToken}, nil } -func generateSessionTokenIdentifier() (string, error) { +func GenerateSessionTokenIdentifier() (string, error) { r, err := rand.Int(rand.Reader, big.NewInt(10000)) if err != nil { return "", fmt.Errorf("failed to generate random number: %w", err) diff --git a/app/api/handler/system/list_config.go b/app/api/handler/system/list_config.go index c8b1ddbf8..c29ef3176 100644 --- a/app/api/handler/system/list_config.go +++ b/app/api/handler/system/list_config.go @@ -27,6 +27,7 @@ type ConfigOutput struct { PublicResourceCreationEnabled bool `json:"public_resource_creation_enabled"` SSHEnabled bool `json:"ssh_enabled"` GitspaceEnabled bool `json:"gitspace_enabled"` + ArtifactRegistryEnabled bool `json:"artifact_registry_enabled"` } // HandleGetConfig returns an http.HandlerFunc that processes an http.Request @@ -46,6 +47,7 @@ func HandleGetConfig(config *types.Config, sysCtrl *system.Controller) http.Hand UserSignupAllowed: userSignupAllowed, PublicResourceCreationEnabled: config.PublicResourceCreationEnabled, GitspaceEnabled: config.Gitspace.Enable, + ArtifactRegistryEnabled: config.Registry.Enable, }) } } diff --git a/app/api/middleware/authz/authz.go b/app/api/middleware/authz/authz.go index 1ab072aa7..f48c9da4a 100644 --- a/app/api/middleware/authz/authz.go +++ b/app/api/middleware/authz/authz.go @@ -28,21 +28,23 @@ import ( // BlockSessionToken blocks any request that uses a session token for authentication. // NOTE: Major use case as of now is blocking usage of session tokens with git. func BlockSessionToken(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() - // only block if auth data was available and it's based on a session token. - if session, oks := request.AuthSessionFrom(ctx); oks { - if tokenMetadata, okt := session.Metadata.(*auth.TokenMetadata); okt && - tokenMetadata.TokenType == enum.TokenTypeSession { - log.Ctx(ctx).Warn().Msg("blocking git operation - session tokens are not allowed for usage with git") + // only block if auth data was available and it's based on a session token. + if session, oks := request.AuthSessionFrom(ctx); oks { + if tokenMetadata, ok := session.Metadata.(*auth.TokenMetadata); ok && + tokenMetadata.TokenType == enum.TokenTypeSession { + log.Ctx(ctx).Warn().Msg("blocking git operation - session tokens are not allowed for usage with git") - // NOTE: Git doesn't print the error message, so just return default 401 Unauthorized. - render.Unauthorized(ctx, w) - return + // NOTE: Git doesn't print the error message, so just return default 401 Unauthorized. + render.Unauthorized(ctx, w) + return + } } - } - next.ServeHTTP(w, r) - }) + next.ServeHTTP(w, r) + }, + ) } diff --git a/app/auth/authn/jwt.go b/app/auth/authn/jwt.go index 902abb01e..fdb04e01c 100644 --- a/app/auth/authn/jwt.go +++ b/app/auth/authn/jwt.go @@ -62,13 +62,15 @@ func (a *JWTAuthenticator) Authenticate(r *http.Request) (*auth.Session, error) var principal *types.Principal var err error claims := &jwt.Claims{} - parsed, err := gojwt.ParseWithClaims(str, claims, func(_ *gojwt.Token) (interface{}, error) { - principal, err = a.principalStore.Find(ctx, claims.PrincipalID) - if err != nil { - return nil, fmt.Errorf("failed to get principal for token: %w", err) - } - return []byte(principal.Salt), nil - }) + parsed, err := gojwt.ParseWithClaims( + str, claims, func(_ *gojwt.Token) (interface{}, error) { + principal, err = a.principalStore.Find(ctx, claims.PrincipalID) + if err != nil { + return nil, fmt.Errorf("failed to get principal for token: %w", err) + } + return []byte(principal.Salt), nil + }, + ) if err != nil { return nil, fmt.Errorf("parsing of JWT claims failed: %w", err) } @@ -90,6 +92,8 @@ func (a *JWTAuthenticator) Authenticate(r *http.Request) (*auth.Session, error) } case claims.Membership != nil: metadata = a.metadataFromMembershipClaims(claims.Membership) + case claims.AccessPermissions != nil: + metadata = a.metadataFromAccessPermissions(claims.AccessPermissions) default: return nil, fmt.Errorf("jwt is missing sub-claims") } @@ -113,8 +117,10 @@ func (a *JWTAuthenticator) metadataFromTokenClaims( // protect against faked JWTs for other principals in case of single salt leak if principal.ID != tkn.PrincipalID { - return nil, fmt.Errorf("JWT was for principal %d while db token was for principal %d", - principal.ID, tkn.PrincipalID) + return nil, fmt.Errorf( + "JWT was for principal %d while db token was for principal %d", + principal.ID, tkn.PrincipalID, + ) } return &auth.TokenMetadata{ @@ -133,6 +139,14 @@ func (a *JWTAuthenticator) metadataFromMembershipClaims( } } +func (a *JWTAuthenticator) metadataFromAccessPermissions( + s *jwt.SubClaimsAccessPermissions, +) auth.Metadata { + return &auth.AccessPermissionMetadata{ + AccessPermissions: s, + } +} + func extractToken(r *http.Request, cookieName string) string { // Check query param first (as that's most immediately visible to caller) if queryToken, ok := request.GetAccessTokenFromQuery(r); ok { diff --git a/app/auth/authz/membership.go b/app/auth/authz/membership.go index 4153f2ca0..93dde8d08 100644 --- a/app/auth/authz/membership.go +++ b/app/auth/authz/membership.go @@ -26,6 +26,7 @@ import ( "github.com/harness/gitness/types/enum" "github.com/rs/zerolog/log" + "golang.org/x/exp/slices" ) var _ Authorizer = (*MembershipAuthorizer)(nil) @@ -110,6 +111,9 @@ func (a *MembershipAuthorizer) Check( case enum.ResourceTypeInfraProvider: spacePath = scope.SpacePath + case enum.ResourceTypeRegistry: + spacePath = scope.SpacePath + case enum.ResourceTypeUser: // a user is allowed to edit themselves if resource.Identifier == session.Principal.UID && @@ -138,20 +142,29 @@ func (a *MembershipAuthorizer) Check( return a.checkWithMembershipMetadata(ctx, membershipMetadata, spacePath, permission) } + // accessPermissionMetadata contains the access permissions of per space + if accessPermissionMetadata, ok := session.Metadata.(*auth.AccessPermissionMetadata); ok { + return a.checkWithAccessPermissionMetadata(ctx, accessPermissionMetadata, spacePath, permission) + } + // ensure we aren't bypassing unknown metadata with impact on authorization if session.Metadata != nil && session.Metadata.ImpactsAuthorization() { return false, fmt.Errorf("session contains unknown metadata that impacts authorization: %T", session.Metadata) } - return a.permissionCache.Get(ctx, PermissionCacheKey{ - PrincipalID: session.Principal.ID, - SpaceRef: spacePath, - Permission: permission, - }) + return a.permissionCache.Get( + ctx, PermissionCacheKey{ + PrincipalID: session.Principal.ID, + SpaceRef: spacePath, + Permission: permission, + }, + ) } -func (a *MembershipAuthorizer) CheckAll(ctx context.Context, session *auth.Session, - permissionChecks ...types.PermissionCheck) (bool, error) { +func (a *MembershipAuthorizer) CheckAll( + ctx context.Context, session *auth.Session, + permissionChecks ...types.PermissionCheck, +) (bool, error) { for i := range permissionChecks { p := permissionChecks[i] if _, err := a.Check(ctx, session, &p.Scope, &p.Resource, p.Permission); err != nil { @@ -193,3 +206,28 @@ func (a *MembershipAuthorizer) checkWithMembershipMetadata( // access is granted by ephemeral membership return true, nil } + +// checkWithAccessPermissionMetadata checks access using the ephemeral membership provided in the metadata. +func (a *MembershipAuthorizer) checkWithAccessPermissionMetadata( + ctx context.Context, + accessPermissionMetadata *auth.AccessPermissionMetadata, + requestedSpacePath string, + requestedPermission enum.Permission, +) (bool, error) { + space, err := a.spaceStore.FindByRef(ctx, requestedSpacePath) + if err != nil { + return false, fmt.Errorf("failed to find space by ref: %w", err) + } + + if accessPermissionMetadata.AccessPermissions.Permissions == nil { + return false, fmt.Errorf("no %s permission provided", requestedPermission) + } + + for _, accessPermission := range accessPermissionMetadata.AccessPermissions.Permissions { + if space.ID == accessPermission.SpaceID && slices.Contains(accessPermission.Permissions, requestedPermission) { + return true, nil + } + } + + return false, fmt.Errorf("no %s permission provided", requestedPermission) +} diff --git a/app/auth/metadata.go b/app/auth/metadata.go index 486bc3e28..c20f22502 100644 --- a/app/auth/metadata.go +++ b/app/auth/metadata.go @@ -14,7 +14,10 @@ package auth -import "github.com/harness/gitness/types/enum" +import ( + "github.com/harness/gitness/app/jwt" + "github.com/harness/gitness/types/enum" +) type Metadata interface { ImpactsAuthorization() bool @@ -46,3 +49,12 @@ type MembershipMetadata struct { func (m *MembershipMetadata) ImpactsAuthorization() bool { return true } + +// AccessPermissionMetadata contains information about permissions per space. +type AccessPermissionMetadata struct { + AccessPermissions *jwt.SubClaimsAccessPermissions +} + +func (m *AccessPermissionMetadata) ImpactsAuthorization() bool { + return true +} diff --git a/app/jwt/jwt.go b/app/jwt/jwt.go index 9cfa3664e..9aaeee722 100644 --- a/app/jwt/jwt.go +++ b/app/jwt/jwt.go @@ -15,27 +15,35 @@ package jwt import ( + "fmt" "time" "github.com/harness/gitness/types" "github.com/harness/gitness/types/enum" "github.com/golang-jwt/jwt" - "github.com/pkg/errors" ) const ( issuer = "Gitness" ) +// Source represents the source of the SubClaimsAccessPermissions. +type Source string + +const ( + OciSource Source = "oci" +) + // Claims defines gitness jwt claims. type Claims struct { jwt.StandardClaims PrincipalID int64 `json:"pid,omitempty"` - Token *SubClaimsToken `json:"tkn,omitempty"` - Membership *SubClaimsMembership `json:"ms,omitempty"` + Token *SubClaimsToken `json:"tkn,omitempty"` + Membership *SubClaimsMembership `json:"ms,omitempty"` + AccessPermissions *SubClaimsAccessPermissions `json:"ap,omitempty"` } // SubClaimsToken contains information about the token the JWT was created for. @@ -50,6 +58,18 @@ type SubClaimsMembership struct { SpaceID int64 `json:"sid,omitempty"` } +// SubClaimsAccessPermissions stores allowed actions on a resource. +type SubClaimsAccessPermissions struct { + Source Source `json:"src,omitempty"` + Permissions []AccessPermissions `json:"permissions,omitempty"` +} + +// AccessPermissions stores allowed actions on a resource. +type AccessPermissions struct { + SpaceID int64 `json:"sid,omitempty"` + Permissions []enum.Permission `json:"p"` +} + // GenerateForToken generates a jwt for a given token. func GenerateForToken(token *types.Token, secret string) (string, error) { var expiresAt int64 @@ -73,7 +93,7 @@ func GenerateForToken(token *types.Token, secret string) (string, error) { res, err := jwtToken.SignedString([]byte(secret)) if err != nil { - return "", errors.Wrap(err, "Failed to sign token") + return "", fmt.Errorf("failed to sign token: %w", err) } return res, nil @@ -106,7 +126,37 @@ func GenerateWithMembership( res, err := jwtToken.SignedString([]byte(secret)) if err != nil { - return "", errors.Wrap(err, "Failed to sign token") + return "", fmt.Errorf("failed to sign token: %w", err) + } + + return res, nil +} + +// GenerateForTokenWithAccessPermissions generates a jwt for a given token. +func GenerateForTokenWithAccessPermissions( + principalID int64, + lifetime *time.Duration, + secret string, accessPermissions *SubClaimsAccessPermissions, +) (string, error) { + issuedAt := time.Now() + if lifetime == nil { + return "", fmt.Errorf("token lifetime is required") + } + expiresAt := issuedAt.Add(*lifetime) + + jwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{ + StandardClaims: jwt.StandardClaims{ + Issuer: issuer, + IssuedAt: issuedAt.Unix(), + ExpiresAt: expiresAt.Unix(), + }, + PrincipalID: principalID, + AccessPermissions: accessPermissions, + }) + + res, err := jwtToken.SignedString([]byte(secret)) + if err != nil { + return "", fmt.Errorf("failed to sign token: %w", err) } return res, nil diff --git a/app/router/wire.go b/app/router/wire.go index e40e9aaf6..c5ced1bdf 100644 --- a/app/router/wire.go +++ b/app/router/wire.go @@ -48,6 +48,8 @@ import ( "github.com/harness/gitness/app/auth/authn" "github.com/harness/gitness/app/url" "github.com/harness/gitness/git" + "github.com/harness/gitness/registry/app/api" + "github.com/harness/gitness/registry/app/api/router" "github.com/harness/gitness/types" "github.com/google/wire" @@ -56,6 +58,7 @@ import ( // WireSet provides a wire set for this package. var WireSet = wire.NewSet( ProvideRouter, + api.WireSet, ) func GetGitRoutingHost(ctx context.Context, urlProvider url.Provider) string { @@ -106,8 +109,9 @@ func ProvideRouter( capabilitiesCtrl *capabilities.Controller, urlProvider url.Provider, openapi openapi.Service, + registryRouter router.AppRouter, ) *Router { - routers := make([]Interface, 3) + routers := make([]Interface, 4) gitRoutingHost := GetGitRoutingHost(appCtx, urlProvider) gitHandler := NewGitHandler( @@ -116,16 +120,18 @@ func ProvideRouter( repoCtrl, ) routers[0] = NewGitRouter(gitHandler, gitRoutingHost) + routers[1] = router.NewRegistryRouter(registryRouter) - apiHandler := NewAPIHandler(appCtx, config, + apiHandler := NewAPIHandler( + appCtx, config, authenticator, repoCtrl, repoSettingsCtrl, executionCtrl, logCtrl, spaceCtrl, pipelineCtrl, secretCtrl, triggerCtrl, connectorCtrl, templateCtrl, pluginCtrl, pullreqCtrl, webhookCtrl, githookCtrl, git, saCtrl, userCtrl, principalCtrl, checkCtrl, sysCtrl, blobCtrl, searchCtrl, infraProviderCtrl, migrateCtrl, gitspaceCtrl, aiagentCtrl, capabilitiesCtrl) - routers[1] = NewAPIRouter(apiHandler) + routers[2] = NewAPIRouter(apiHandler) webHandler := NewWebHandler(config, authenticator, openapi) - routers[2] = NewWebRouter(webHandler) + routers[3] = NewWebRouter(webHandler) return NewRouter(routers) } diff --git a/app/store/database/migrate/postgres/0066_create_ar_tables.down.sql b/app/store/database/migrate/postgres/0066_create_ar_tables.down.sql new file mode 100644 index 000000000..aa404e008 --- /dev/null +++ b/app/store/database/migrate/postgres/0066_create_ar_tables.down.sql @@ -0,0 +1,13 @@ +DROP TABLE registries; +DROP TABLE media_types; +DROP TABLE blobs; +DROP TABLE registry_blobs; +DROP TABLE manifests; +DROP TABLE manifest_references; +DROP TABLE layers; +DROP TABLE artifacts; +DROP TABLE artifact_stats; +DROP TABLE tags; +DROP TABLE upstream_proxy_configs; +DROP TABLE cleanup_policies; +DROP TABLE cleanup_policy_prefix_mappings; diff --git a/app/store/database/migrate/postgres/0066_create_ar_tables.up.sql b/app/store/database/migrate/postgres/0066_create_ar_tables.up.sql new file mode 100644 index 000000000..4fee82557 --- /dev/null +++ b/app/store/database/migrate/postgres/0066_create_ar_tables.up.sql @@ -0,0 +1,561 @@ +create table registries +( + registry_id SERIAL primary key, + registry_name text not null + constraint registry_name_len_check + check (length(registry_name) <= 255), + registry_root_parent_id INTEGER not null, + registry_parent_id INTEGER not null, + registry_description text, + registry_type text not null, + registry_package_type text not null, + registry_upstream_proxies text, + registry_allowed_pattern text, + registry_blocked_pattern text, + registry_created_at BIGINT not null, + registry_updated_at BIGINT not null, + registry_created_by INTEGER not null, + registry_updated_by INTEGER not null, + registry_labels text, + constraint unique_registries + unique (registry_root_parent_id, registry_name) +); + + +create table media_types +( + mt_id SERIAL primary key, + mt_media_type text not null + constraint unique_media_types_type + unique, + mt_created_at BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT +); + +create table blobs +( + blob_id SERIAL primary key, + blob_root_parent_id INTEGER not null, + blob_digest bytea not null, + blob_media_type_id INTEGER not null + constraint fk_blobs_media_type_id_media_types + references media_types(mt_id), + blob_size BIGINT not null, + blob_created_at BIGINT not null, + blob_created_by INTEGER not null, + constraint unique_digest_root_parent_id unique (blob_digest, blob_root_parent_id) +); + +create index index_blobs_on_media_type_id + on blobs (blob_media_type_id); + +create table registry_blobs +( + rblob_id SERIAL primary key, + rblob_registry_id INTEGER not null + constraint fk_registry_blobs_rpstry_id_registries + references registries + on delete cascade, + rblob_blob_id INTEGER not null + constraint fk_registry_blobs_blob_id_blobs + references blobs + on delete cascade, + rblob_image_name text + constraint registry_blobs_image_len_check + check (length(rblob_image_name) <= 255), + rblob_created_at BIGINT not null, + rblob_updated_at BIGINT not null, + rblob_created_by INTEGER not null, + rblob_updated_by INTEGER not null, + + constraint unique_registry_blobs_registry_id_blob_id_image + unique (rblob_registry_id, rblob_blob_id, rblob_image_name) +); + +create index index_registry_blobs_on_reg_id + on registry_blobs (rblob_registry_id); + +create index index_registry_blobs_on_reg_blob_id + on registry_blobs (rblob_registry_id, rblob_blob_id); + +create table manifests +( + manifest_id SERIAL primary key, + manifest_registry_id INTEGER not null + constraint fk_manifests_registry_id_registries + references registries(registry_id) + on delete cascade, + manifest_schema_version smallint not null, + manifest_media_type_id INTEGER not null + constraint fk_manifests_media_type_id_media_types + references media_types(mt_id), + manifest_artifact_media_type text, + manifest_total_size BIGINT not null, + manifest_configuration_media_type text, + manifest_configuration_payload bytea, + manifest_configuration_blob_id INTEGER + constraint fk_manifests_configuration_blob_id_blobs + references blobs(blob_id), + manifest_configuration_digest bytea, + manifest_digest bytea not null, + manifest_payload bytea not null, + manifest_non_conformant boolean default false, + manifest_non_distributable_layers boolean default false, + manifest_subject_id INTEGER, + manifest_subject_digest bytea, + manifest_annotations bytea, + manifest_image_name text not null + constraint manifests_img_name_len_check + check (length(manifest_image_name) <= 255), + manifest_created_at BIGINT not null, + manifest_created_by INTEGER not null, + manifest_updated_at BIGINT not null, + manifest_updated_by INTEGER not null, + constraint unique_manifests_registry_id_image_name_and_digest + unique (manifest_registry_id, manifest_image_name, manifest_digest), + constraint unique_manifests_registry_id_id_cfg_blob_id + unique (manifest_registry_id, manifest_id, manifest_configuration_blob_id), + constraint fk_manifests_subject_id_manifests + foreign key (manifest_subject_id) references manifests + on delete cascade +); + +create index index_manifests_on_media_type_id + on manifests (manifest_media_type_id); + +create index index_manifests_on_configuration_blob_id + on manifests (manifest_configuration_blob_id); + +create table manifest_references +( + manifest_ref_id SERIAL primary key, + manifest_ref_registry_id INTEGER not null, + manifest_ref_parent_id INTEGER not null, + manifest_ref_child_id INTEGER not null, + manifest_ref_created_at BIGINT not null, + manifest_ref_updated_at BIGINT not null, + manifest_ref_created_by INTEGER not null, + manifest_ref_updated_by INTEGER not null, + constraint unique_manifest_references_prt_id_chd_id + unique (manifest_ref_registry_id, manifest_ref_parent_id, manifest_ref_child_id), + constraint fk_manifest_references_parent_id_mnfsts + foreign key (manifest_ref_parent_id) references manifests + on delete cascade, + constraint fk_manifest_references_child_id_mnfsts + foreign key (manifest_ref_child_id) references manifests, + constraint check_manifest_references_parent_id_and_child_id_differ + check (manifest_ref_parent_id <> manifest_ref_child_id) +); + +create index index_manifest_references_on_rpstry_id_child_id + on manifest_references (manifest_ref_registry_id, manifest_ref_child_id); + +create table layers +( + layer_id SERIAL primary key, + layer_registry_id INTEGER not null, + layer_manifest_id INTEGER not null, + layer_media_type_id INTEGER not null + constraint fk_layer_media_type_id_media_types + references media_types, + layer_blob_id INTEGER not null + constraint fk_layer_blob_id_blobs + references blobs, + layer_size BIGINT not null, + layer_created_at BIGINT not null, + layer_updated_at BIGINT not null, + layer_created_by INTEGER not null, + layer_updated_by INTEGER not null, + constraint unique_layer_rpstry_id_and_mnfst_id_and_blob_id + unique (layer_registry_id, layer_manifest_id, layer_blob_id), + constraint unique_layer_rpstry_id_and_id_and_blob_id + unique (layer_registry_id, layer_id, layer_blob_id), + constraint fk_manifst_id_manifests + foreign key (layer_manifest_id) references manifests(manifest_id) + on delete cascade +); + +create index index_layer_on_media_type_id + on layers (layer_media_type_id); + +create index index_layer_on_blob_id + on layers (layer_blob_id); + +create table artifacts +( + artifact_id SERIAL primary key, + artifact_name text not null, + artifact_registry_id INTEGER not null + constraint fk_registries_registry_id + references registries(registry_id) + on delete cascade, + artifact_labels text, + artifact_enabled boolean default false, + artifact_created_at BIGINT, + artifact_updated_at BIGINT, + artifact_created_by INTEGER, + artifact_updated_by INTEGER, + constraint unique_artifact_registry_id_and_name unique (artifact_registry_id, artifact_name), + constraint check_artifact_name_length check ((char_length(artifact_name) <= 255)) +); + +create index index_artifact_on_registry_id ON artifacts USING btree (artifact_registry_id); + + +create table artifact_stats +( + artifact_stat_id SERIAL primary key, + artifact_stat_artifact_id INTEGER not null + constraint fk_artifacts_artifact_id + references artifacts(artifact_id), + artifact_stat_date BIGINT, + artifact_stat_download_count BIGINT, + artifact_stat_upload_bytes BIGINT, + artifact_stat_download_bytes BIGINT, + artifact_stat_created_at BIGINT not null, + artifact_stat_updated_at BIGINT not null, + artifact_stat_created_by INTEGER not null, + artifact_stat_updated_by INTEGER not null, + constraint unique_artifact_stats_artifact_id_and_date unique (artifact_stat_artifact_id, artifact_stat_date) +); + +create table tags +( + tag_id SERIAL primary key, + tag_name text not null + constraint tag_name_len_check + check (char_length(tag_name) <= 128), + tag_image_name text not null + constraint tag_img_name_len_check + check (length(tag_image_name) <= 255), + tag_registry_id INTEGER not null, + tag_manifest_id INTEGER not null, + tag_created_at BIGINT, + tag_updated_at BIGINT, + tag_created_by INTEGER, + tag_updated_by INTEGER, + constraint fk_tag_manifest_id_manifests FOREIGN KEY +(tag_manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE, + constraint unique_tag_registry_id_and_name_and_image_name + unique (tag_registry_id, tag_name, tag_image_name) +); + +create index index_tag_on_rpository_id_and_manifest_id + on tags (tag_registry_id, tag_manifest_id); + +create table upstream_proxy_configs +( + upstream_proxy_config_id SERIAL primary key, + upstream_proxy_config_registry_id INTEGER not null + constraint fk_upstream_proxy_config_registry_id + references registries + on delete cascade, + upstream_proxy_config_source text, + upstream_proxy_config_url text, + upstream_proxy_config_auth_type text not null, + upstream_proxy_config_user_name text, + upstream_proxy_config_secret_identifier text, + upstream_proxy_config_secret_space_id INTEGER, + constraint fk_layers_secret_identifier_and_secret_space_id + foreign key (upstream_proxy_config_secret_identifier, upstream_proxy_config_secret_space_id) + references secrets(secret_uid, secret_space_id) + on delete cascade, + upstream_proxy_config_token text, + upstream_proxy_config_created_at BIGINT, + upstream_proxy_config_updated_at BIGINT, + upstream_proxy_config_created_by INTEGER, + upstream_proxy_config_updated_by INTEGER +); + +create index index_upstream_proxy_config_on_registry_id + on upstream_proxy_configs (upstream_proxy_config_registry_id); + +create table cleanup_policies +( + cp_id SERIAL primary key, + cp_registry_id INTEGER not null + constraint fk_cleanup_policies_registry_id + references registries ON DELETE CASCADE, + cp_name text, + cp_expiry_time_ms BIGINT, + cp_created_at BIGINT not null, + cp_updated_at BIGINT not null, + cp_created_by INTEGER not null, + cp_updated_by INTEGER not null +); + +create index index_cleanup_policies_on_registry_id + on cleanup_policies (cp_registry_id); + +create table cleanup_policy_prefix_mappings +( + cpp_id SERIAL primary key, + cpp_cleanup_policy_id INTEGER not null + constraint fk_cleanup_policies_id + references cleanup_policies(cp_id) ON DELETE CASCADE, + cpp_prefix text not null, + cpp_prefix_type text not null +); + +create index index_cleanup_policy_map_on_policy_id + on cleanup_policy_prefix_mappings (cpp_cleanup_policy_id); + +insert into media_types (mt_media_type) +values ('application/vnd.docker.distribution.manifest.v1+json'), + ('application/vnd.docker.distribution.manifest.v1+prettyjws'), + ('application/vnd.docker.distribution.manifest.v2+json'), + ('application/vnd.docker.distribution.manifest.list.v2+json'), + ('application/vnd.docker.image.rootfs.diff.tar'), + ('application/vnd.docker.image.rootfs.diff.tar.gzip'), + ('application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'), + ('application/vnd.docker.container.image.v1+json'), + ('application/vnd.docker.container.image.rootfs.diff+x-gtar'), + ('application/vnd.docker.plugin.v1+json'), + ('application/vnd.oci.image.layer.v1.tar'), + ('application/vnd.oci.image.layer.v1.tar+gzip'), + ('application/vnd.oci.image.layer.v1.tar+zstd'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar+gzip'), + ('application/vnd.oci.image.config.v1+json'), + ('application/vnd.oci.image.manifest.v1+json'), + ('application/vnd.oci.image.index.v1+json'), + ('application/vnd.cncf.helm.config.v1+json'), + ('application/tar+gzip'), + ('application/octet-stream'), + ('application/vnd.buildkit.cacheconfig.v0'), + ('application/vnd.cncf.helm.chart.content.v1.tar+gzip'), + ('application/vnd.cncf.helm.chart.provenance.v1.prov'); + + +CREATE TABLE gc_blob_review_queue +( + blob_id INTEGER NOT NULL, + review_after BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM (NOW() + INTERVAL '1 day'))), + review_count INTEGER NOT NULL DEFAULT 0, + created_at BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()), + event text NOT NULL, + CONSTRAINT pk_gc_blob_review_queue primary key (blob_id) +); + +CREATE INDEX index_gc_blob_review_queue_on_review_after ON gc_blob_review_queue USING btree (review_after); + +CREATE TABLE gc_review_after_defaults +( + event text NOT NULL, + value interval NOT NULL, + CONSTRAINT pk_gc_review_after_defaults PRIMARY KEY (event), + CONSTRAINT check_gc_review_after_defaults_event_length CHECK ((char_length(event) <= 255)) +); + +INSERT INTO gc_review_after_defaults (event, value) +VALUES ('blob_upload', interval '1 day'), + ('manifest_upload', interval '1 day'), + ('manifest_delete', interval '1 day'), + ('layer_delete', interval '1 day'), + ('manifest_list_delete', interval '1 day'), + ('tag_delete', interval '1 day'), + ('tag_switch', interval '1 day') +ON CONFLICT (event) + DO NOTHING; + +CREATE TABLE gc_manifest_review_queue +( + registry_id INTEGER NOT NULL, + manifest_id INTEGER NOT NULL, + review_after BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM (NOW() + INTERVAL '1 day'))), + review_count INTEGER NOT NULL DEFAULT 0, + created_at BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()), + event text NOT NULL, + CONSTRAINT pk_gc_manifest_review_queue PRIMARY KEY (registry_id, manifest_id), + CONSTRAINT fk_gc_manifest_review_queue_rp_id_mfst_id_mnfsts FOREIGN KEY (manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE +); + +CREATE INDEX index_gc_manifest_review_queue_on_review_after ON gc_manifest_review_queue USING btree (review_after); + +CREATE OR REPLACE FUNCTION gc_review_after(e text) + RETURNS BIGINT + VOLATILE +AS +$$ +DECLARE + result timestamp WITH time zone; +BEGIN + SELECT (now() + value) + INTO result + FROM gc_review_after_defaults + WHERE event = e; + IF result IS NULL THEN + RETURN EXTRACT(EPOCH FROM (now() + interval '1 day')); + ELSE + RETURN EXTRACT(EPOCH FROM result); + END IF; +END; +$$ + LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION gc_track_blob_uploads() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + VALUES (NEW.blob_id, gc_review_after('blob_upload'), 'blob_upload') + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('blob_upload'), + event = 'blob_upload'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_blob_uploads_trigger + AFTER INSERT + ON blobs + FOR EACH ROW +EXECUTE PROCEDURE public.gc_track_blob_uploads(); + +CREATE OR REPLACE FUNCTION gc_track_manifest_uploads() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (NEW.manifest_registry_id, NEW.manifest_id, gc_review_after('manifest_upload'), 'manifest_upload'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_manifest_uploads_trigger + AFTER INSERT + ON manifests + FOR EACH ROW +EXECUTE PROCEDURE gc_track_manifest_uploads(); + +CREATE OR REPLACE FUNCTION gc_track_deleted_manifests() + RETURNS TRIGGER +AS +$$ +BEGIN + IF OLD.manifest_configuration_blob_id IS NOT NULL THEN -- not all manifests have a configuration +INSERT INTO gc_blob_review_queue (blob_id, review_after, event) +VALUES (OLD.manifest_configuration_blob_id, gc_review_after('manifest_delete'), 'manifest_delete') +ON CONFLICT (blob_id) + DO UPDATE SET + review_after = gc_review_after('manifest_delete'), + event = 'manifest_delete'; +END IF; +RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION gc_track_deleted_layers() + RETURNS TRIGGER +AS +$$ +BEGIN + IF (TG_LEVEL = 'STATEMENT') THEN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + SELECT deleted_rows.layer_blob_id, + gc_review_after('layer_delete'), + 'layer_delete' + FROM old_table deleted_rows + JOIN + blobs b ON deleted_rows.layer_blob_id = b.blob_id + ORDER BY deleted_rows.layer_blob_id ASC + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('layer_delete'), + event = 'layer_delete'; + ELSIF (TG_LEVEL = 'ROW') THEN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + VALUES (OLD.blob_id, gc_review_after('layer_delete'), 'layer_delete') + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('layer_delete'), + event = 'layer_delete'; + END IF; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_manifests_trigger + AFTER DELETE + ON manifests + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_manifests(); + +CREATE TRIGGER gc_track_deleted_layers_trigger + AFTER DELETE + ON layers + REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT +EXECUTE FUNCTION gc_track_deleted_layers(); + +CREATE OR REPLACE FUNCTION gc_track_deleted_manifest_lists() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.manifest_ref_registry_id, OLD.manifest_ref_child_id, gc_review_after('manifest_list_delete'), 'manifest_list_delete') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('manifest_list_delete'), + event = 'manifest_list_delete'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_manifest_lists_trigger + AFTER DELETE + ON manifest_references + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_manifest_lists(); + + +CREATE OR REPLACE FUNCTION gc_track_deleted_tags() + RETURNS TRIGGER +AS +$$ +BEGIN + IF EXISTS (SELECT 1 + FROM manifests + WHERE manifest_registry_id = OLD.tag_registry_id + AND manifest_id = OLD.tag_registry_id) THEN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.tag_registry_id, OLD.tag_manifest_id, gc_review_after('tag_delete'), 'tag_delete') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('tag_delete'), + event = 'tag_delete'; + END IF; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_tag_trigger + AFTER DELETE + ON tags + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_tags(); + +CREATE OR REPLACE FUNCTION gc_track_switched_tags() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.tag_registry_id, OLD.tag_manifest_id, gc_review_after('tag_switch'), 'tag_switch') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('tag_switch'), + event = 'tag_switch'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_switched_tag_trigger + AFTER UPDATE OF tag_manifest_id + ON tags + FOR EACH ROW +EXECUTE PROCEDURE gc_track_switched_tags(); \ No newline at end of file diff --git a/app/store/database/migrate/sqlite/0065_create_ar_tables.down.sql b/app/store/database/migrate/sqlite/0065_create_ar_tables.down.sql new file mode 100644 index 000000000..aa404e008 --- /dev/null +++ b/app/store/database/migrate/sqlite/0065_create_ar_tables.down.sql @@ -0,0 +1,13 @@ +DROP TABLE registries; +DROP TABLE media_types; +DROP TABLE blobs; +DROP TABLE registry_blobs; +DROP TABLE manifests; +DROP TABLE manifest_references; +DROP TABLE layers; +DROP TABLE artifacts; +DROP TABLE artifact_stats; +DROP TABLE tags; +DROP TABLE upstream_proxy_configs; +DROP TABLE cleanup_policies; +DROP TABLE cleanup_policy_prefix_mappings; diff --git a/app/store/database/migrate/sqlite/0065_create_ar_tables.up.sql b/app/store/database/migrate/sqlite/0065_create_ar_tables.up.sql new file mode 100644 index 000000000..4972b3289 --- /dev/null +++ b/app/store/database/migrate/sqlite/0065_create_ar_tables.up.sql @@ -0,0 +1,330 @@ +create table registries +( + registry_id INTEGER PRIMARY KEY AUTOINCREMENT, + registry_name text not null + constraint registry_name_len_check + check (length(registry_name) <= 255), + registry_root_parent_id INTEGER not null, + registry_parent_id INTEGER not null, + registry_description text, + registry_type text not null, + registry_package_type text not null, + registry_upstream_proxies text, + registry_allowed_pattern text, + registry_blocked_pattern text, + registry_labels text, + registry_created_at INTEGER not null, + registry_updated_at INTEGER not null, + registry_created_by INTEGER not null, + registry_updated_by INTEGER not null, + constraint unique_registries + unique (registry_root_parent_id, registry_name) +); + +create table media_types +( + mt_id INTEGER PRIMARY KEY AUTOINCREMENT, + mt_media_type text not null + constraint unique_media_types_type + unique, + mt_created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000) +); + +create table blobs +( + blob_id INTEGER PRIMARY KEY AUTOINCREMENT, + blob_root_parent_id INTEGER not null, + blob_digest bytea not null, + blob_media_type_id INTEGER not null + constraint fk_blobs_media_type_id_media_types + references media_types(mt_id), + blob_size INTEGER not null, + blob_created_at INTEGER not null, + blob_created_by INTEGER not null, + constraint unique_digest_root_parent_id unique (blob_digest, blob_root_parent_id) + ); + +create index index_blobs_on_media_type_id + on blobs (blob_media_type_id); + +create table registry_blobs +( + rblob_id INTEGER PRIMARY KEY AUTOINCREMENT, + rblob_registry_id INTEGER not null + constraint fk_registry_blobs_rpstry_id_registries + references registries(registry_id) + on delete cascade, + rblob_blob_id INTEGER not null + constraint fk_registry_blobs_blob_id_blobs + references blobs(blob_id) + on delete cascade, + rblob_image_name text + constraint registry_blobs_image_len_check + check (length(rblob_image_name) <= 255), + rblob_created_at INTEGER not null, + rblob_updated_at INTEGER not null, + rblob_created_by INTEGER not null, + rblob_updated_by INTEGER not null, + + constraint unique_registry_blobs_registry_id_blob_id_image + unique (rblob_registry_id, rblob_blob_id, rblob_image_name) + ); + +create index index_registry_blobs_on_reg_id + on registry_blobs (rblob_registry_id); + +create index index_registry_blobs_on_reg_blob_id + on registry_blobs (rblob_registry_id, rblob_blob_id); + + + +create table manifests +( + manifest_id INTEGER PRIMARY KEY AUTOINCREMENT, + manifest_registry_id INTEGER not null + constraint fk_manifests_registry_id_registries + references registries(registry_id) + on delete cascade, + manifest_schema_version smallint not null, + manifest_media_type_id INTEGER not null + constraint fk_manifests_media_type_id_media_types + references media_types(mt_id), + manifest_artifact_media_type text, + manifest_total_size INTEGER not null, + manifest_configuration_media_type text, + manifest_configuration_payload bytea, + manifest_configuration_blob_id INTEGER + constraint fk_manifests_configuration_blob_id_blobs + references blobs(blob_id), + manifest_configuration_digest bytea, + manifest_digest bytea not null, + manifest_payload bytea not null, + manifest_non_conformant boolean default false, + manifest_non_distributable_layers boolean default false, + manifest_subject_id INTEGER, + manifest_subject_digest bytea, + manifest_annotations bytea, + manifest_image_name text not null + constraint manifests_img_name_len_check + check (length(manifest_image_name) <= 255), + manifest_created_at INTEGER not null, + manifest_created_by INTEGER not null, + manifest_updated_at INTEGER not null, + manifest_updated_by INTEGER not null, + constraint unique_manifests_registry_id_image_name_and_digest + unique (manifest_registry_id, manifest_image_name, manifest_digest), + constraint unique_manifests_registry_id_id_cfg_blob_id + unique (manifest_registry_id, manifest_id, manifest_configuration_blob_id), + constraint fk_manifests_subject_id_manifests + foreign key (manifest_subject_id) references manifests(manifest_id) + on delete cascade + ); + +create index index_manifests_on_media_type_id + on manifests (manifest_media_type_id); + +create index index_manifests_on_configuration_blob_id + on manifests (manifest_configuration_blob_id); + + + +create table manifest_references +( + manifest_ref_id INTEGER PRIMARY KEY AUTOINCREMENT, + manifest_ref_registry_id INTEGER not null, + manifest_ref_parent_id INTEGER not null, + manifest_ref_child_id INTEGER not null, + manifest_ref_created_at INTEGER not null, + manifest_ref_updated_at INTEGER not null, + manifest_ref_created_by INTEGER not null, + manifest_ref_updated_by INTEGER not null, + constraint unique_manifest_references_prt_id_chd_id + unique (manifest_ref_registry_id, manifest_ref_parent_id, manifest_ref_child_id), + constraint fk_manifest_ref_parent_id_manifests_manifest_id + foreign key (manifest_ref_parent_id) references manifests(manifest_id) + on delete cascade, + constraint fk_manifest_ref_child_id_manifests_manifest_id + foreign key (manifest_ref_child_id) references manifests(manifest_id), + constraint check_manifest_references_parent_id_and_child_id_differ + check (manifest_ref_parent_id <> manifest_ref_child_id) + ); + +create index index_manifest_references_on_rpstry_id_child_id + on manifest_references (manifest_ref_registry_id, manifest_ref_child_id); + +create table layers +( + layer_id INTEGER PRIMARY KEY AUTOINCREMENT, + layer_registry_id INTEGER not null, + layer_manifest_id INTEGER not null, + layer_media_type_id INTEGER not null + constraint fk_layer_media_type_id_media_types + references media_types(mt_id), + layer_blob_id INTEGER not null + constraint fk_layer_blob_id_blobs + references blobs(blob_id), + layer_size INTEGER not null, + layer_created_at INTEGER not null, + layer_updated_at INTEGER not null, + layer_created_by INTEGER not null, + layer_updated_by INTEGER not null, + constraint unique_layer_rpstry_id_and_mnfst_id_and_blob_id + unique (layer_registry_id, layer_manifest_id, layer_blob_id), + constraint unique_layer_rpstry_id_and_id_and_blob_id + unique (layer_registry_id, layer_id, layer_blob_id), + constraint fk_layer_manifest_id_and_manifests_manifest_id + foreign key (layer_manifest_id) references manifests(manifest_id) + on delete cascade + ); + +create index index_layer_on_media_type_id + on layers (layer_media_type_id); + +create index index_layer_on_blob_id + on layers (layer_blob_id); + +create table artifacts +( + artifact_id INTEGER PRIMARY KEY AUTOINCREMENT, + artifact_name text not null, + artifact_registry_id INTEGER not null + constraint fk_registries_registry_id + references registries(registry_id) + on delete cascade, + artifact_labels text, + artifact_enabled boolean default false, + artifact_created_at INTEGER, + artifact_updated_at INTEGER, + artifact_created_by INTEGER, + artifact_updated_by INTEGER, + constraint unique_artifact_registry_id_and_name unique (artifact_registry_id, artifact_name), + constraint check_artifact_name_length check ((length(artifact_name) <= 255)) + ); + +create index index_artifact_on_registry_id ON artifacts (artifact_registry_id); + + +create table artifact_stats +( + artifact_stat_id INTEGER PRIMARY KEY AUTOINCREMENT, + artifact_stat_artifact_id INTEGER not null + constraint fk_artifacts_artifact_id + references artifacts(artifact_id), + artifact_stat_date INTEGER, + artifact_stat_download_count INTEGER, + artifact_stat_upload_bytes INTEGER, + artifact_stat_download_bytes INTEGER, + artifact_stat_created_at INTEGER not null, + artifact_stat_updated_at INTEGER not null, + artifact_stat_created_by INTEGER not null, + artifact_stat_updated_by INTEGER not null, + constraint unique_artifact_stats_artifact_id_and_date unique (artifact_stat_artifact_id, artifact_stat_date) + ); + +create table tags +( + tag_id INTEGER PRIMARY KEY AUTOINCREMENT, + tag_name text not null + constraint tag_name_len_check + check (length(tag_name) <= 128), + tag_image_name text not null + constraint tag_img_name_len_check + check (length(tag_image_name) <= 255), + tag_registry_id INTEGER not null, + tag_manifest_id INTEGER not null, + tag_created_at INTEGER, + tag_updated_at INTEGER, + tag_created_by INTEGER, + tag_updated_by INTEGER, + constraint fk_tag_manifest_id_and_manifests_manifest_id FOREIGN KEY + (tag_manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE, + constraint unique_tag_registry_id_and_name_and_image_name + unique (tag_registry_id, tag_name, tag_image_name) + ); + +create index index_tag_on_rpository_id_and_manifest_id + on tags (tag_registry_id, tag_manifest_id); + +create table upstream_proxy_configs +( + upstream_proxy_config_id INTEGER PRIMARY KEY AUTOINCREMENT, + upstream_proxy_config_registry_id INTEGER not null + constraint fk_upstream_proxy_config_registry_id + references registries(registry_id) + on delete cascade, + upstream_proxy_config_source text, + upstream_proxy_config_url text, + upstream_proxy_config_auth_type text not null, + upstream_proxy_config_user_name text, + upstream_proxy_config_secret_identifier text, + upstream_proxy_config_secret_space_id int, + upstream_proxy_config_token text, + upstream_proxy_config_created_at INTEGER, + upstream_proxy_config_updated_at INTEGER, + upstream_proxy_config_created_by INTEGER, + upstream_proxy_config_updated_by INTEGER, + constraint fk_layers_secret_identifier_and_secret_space_id FOREIGN KEY + (upstream_proxy_config_secret_identifier, upstream_proxy_config_secret_space_id) REFERENCES secrets(secret_uid, secret_space_id) + ON DELETE CASCADE +); + +create index index_upstream_proxy_config_on_registry_id + on upstream_proxy_configs (upstream_proxy_config_registry_id); + +create table cleanup_policies +( + cp_id INTEGER PRIMARY KEY AUTOINCREMENT, + cp_registry_id INTEGER not null + constraint fk_cleanup_policies_registry_id + references registries(registry_id) ON DELETE CASCADE, + cp_name text, + cp_expiry_time_ms INTEGER, + cp_created_at INTEGER not null, + cp_updated_at INTEGER not null, + cp_created_by INTEGER not null, + cp_updated_by INTEGER not null +); + +create index index_cleanup_policies_on_registry_id + on cleanup_policies (cp_registry_id); + +create table cleanup_policy_prefix_mappings +( + cpp_id INTEGER PRIMARY KEY AUTOINCREMENT, + cpp_cleanup_policy_id INTEGER not null + constraint fk_cleanup_policy_prefix_registry_id + references cleanup_policies(cp_id) ON DELETE CASCADE, + cpp_prefix text not null, + cpp_prefix_type text not null +); + +create index index_cleanup_policy_map_on_policy_id + on cleanup_policy_prefix_mappings (cpp_cleanup_policy_id); + + + +insert into media_types (mt_media_type) +values ('application/vnd.docker.distribution.manifest.v1+json'), + ('application/vnd.docker.distribution.manifest.v1+prettyjws'), + ('application/vnd.docker.distribution.manifest.v2+json'), + ('application/vnd.docker.distribution.manifest.list.v2+json'), + ('application/vnd.docker.image.rootfs.diff.tar'), + ('application/vnd.docker.image.rootfs.diff.tar.gzip'), + ('application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'), + ('application/vnd.docker.container.image.v1+json'), + ('application/vnd.docker.container.image.rootfs.diff+x-gtar'), + ('application/vnd.docker.plugin.v1+json'), + ('application/vnd.oci.image.layer.v1.tar'), + ('application/vnd.oci.image.layer.v1.tar+gzip'), + ('application/vnd.oci.image.layer.v1.tar+zstd'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar+gzip'), + ('application/vnd.oci.image.config.v1+json'), + ('application/vnd.oci.image.manifest.v1+json'), + ('application/vnd.oci.image.index.v1+json'), + ('application/vnd.cncf.helm.config.v1+json'), + ('application/tar+gzip'), + ('application/octet-stream'), + ('application/vnd.buildkit.cacheconfig.v0'), + ('application/vnd.cncf.helm.chart.content.v1.tar+gzip'), + ('application/vnd.cncf.helm.chart.provenance.v1.prov'); diff --git a/app/store/database/wire.go b/app/store/database/wire.go index b02b4248e..3a0acbcf6 100644 --- a/app/store/database/wire.go +++ b/app/store/database/wire.go @@ -223,14 +223,16 @@ func ProvideTokenStore(db *sqlx.DB) store.TokenStore { } // ProvidePullReqStore provides a pull request store. -func ProvidePullReqStore(db *sqlx.DB, +func ProvidePullReqStore( + db *sqlx.DB, principalInfoCache store.PrincipalInfoCache, ) store.PullReqStore { return NewPullReqStore(db, principalInfoCache) } // ProvidePullReqActivityStore provides a pull request activity store. -func ProvidePullReqActivityStore(db *sqlx.DB, +func ProvidePullReqActivityStore( + db *sqlx.DB, principalInfoCache store.PrincipalInfoCache, ) store.PullReqActivityStore { return NewPullReqActivityStore(db, principalInfoCache) @@ -247,7 +249,8 @@ func ProvidePullReqReviewStore(db *sqlx.DB) store.PullReqReviewStore { } // ProvidePullReqReviewerStore provides a pull request reviewer store. -func ProvidePullReqReviewerStore(db *sqlx.DB, +func ProvidePullReqReviewerStore( + db *sqlx.DB, principalInfoCache store.PrincipalInfoCache, ) store.PullReqReviewerStore { return NewPullReqReviewerStore(db, principalInfoCache) @@ -269,7 +272,8 @@ func ProvideWebhookExecutionStore(db *sqlx.DB) store.WebhookExecutionStore { } // ProvideCheckStore provides a status check result store. -func ProvideCheckStore(db *sqlx.DB, +func ProvideCheckStore( + db *sqlx.DB, principalInfoCache store.PrincipalInfoCache, ) store.CheckStore { return NewCheckStore(db, principalInfoCache) diff --git a/app/token/token.go b/app/token/token.go index 0b7ce4b13..e280e84aa 100644 --- a/app/token/token.go +++ b/app/token/token.go @@ -30,9 +30,22 @@ import ( const ( // userSessionTokenLifeTime is the duration a login / register token is valid. // NOTE: Users can list / delete session tokens via rest API if they want to cleanup earlier. - userSessionTokenLifeTime time.Duration = 30 * 24 * time.Hour // 30 days. + userSessionTokenLifeTime time.Duration = 30 * 24 * time.Hour // 30 days. + sessionTokenWithAccessPermissionsLifeTime time.Duration = 24 * time.Hour // 24 hours. ) +func CreateUserWithAccessPermissions( + user *types.User, + accessPermissions *jwt.SubClaimsAccessPermissions, +) (string, error) { + principal := user.ToPrincipal() + return createWithAccessPermissions( + principal, + ptr.Duration(sessionTokenWithAccessPermissionsLifeTime), + accessPermissions, + ) +} + func CreateUserSession( ctx context.Context, tokenStore store.TokenStore, @@ -128,3 +141,18 @@ func create( return &token, jwtToken, nil } + +func createWithAccessPermissions( + createdFor *types.Principal, + lifetime *time.Duration, + accessPermissions *jwt.SubClaimsAccessPermissions, +) (string, error) { + jwtToken, err := jwt.GenerateForTokenWithAccessPermissions( + createdFor.ID, lifetime, createdFor.Salt, accessPermissions, + ) + if err != nil { + return "", fmt.Errorf("failed to create jwt token: %w", err) + } + + return jwtToken, nil +} diff --git a/app/url/provider.go b/app/url/provider.go index 14915a7b5..cd43f078b 100644 --- a/app/url/provider.go +++ b/app/url/provider.go @@ -74,6 +74,9 @@ type Provider interface { // GetAPIProto returns the proto for the API hostname GetAPIProto(ctx context.Context) string + + // RegistryURL returns the url for oci token endpoint + RegistryURL() string } // Provider provides the URLs of the gitness system. @@ -99,6 +102,9 @@ type provider struct { // uiURL stores the raw URL to the ui endpoints. uiURL *url.URL + + // registryURL stores the raw URL to the registry endpoints. + registryURL *url.URL } func NewProvider( @@ -110,6 +116,7 @@ func NewProvider( sshDefaultUser string, sshEnabled bool, uiURLRaw string, + registryURLRaw string, ) (Provider, error) { // remove trailing '/' to make usage easier internalURLRaw = strings.TrimRight(internalURLRaw, "/") @@ -118,6 +125,7 @@ func NewProvider( gitURLRaw = strings.TrimRight(gitURLRaw, "/") gitSSHURLRaw = strings.TrimRight(gitSSHURLRaw, "/") uiURLRaw = strings.TrimRight(uiURLRaw, "/") + registryURLRaw = strings.TrimRight(registryURLRaw, "/") internalURL, err := url.Parse(internalURLRaw) if err != nil { @@ -149,6 +157,11 @@ func NewProvider( return nil, fmt.Errorf("provided uiURLRaw '%s' is invalid: %w", uiURLRaw, err) } + registryURL, err := url.Parse(registryURLRaw) + if err != nil { + return nil, fmt.Errorf("provided registryURLRaw '%s' is invalid: %w", registryURLRaw, err) + } + return &provider{ internalURL: internalURL, containerURL: containerURL, @@ -158,6 +171,7 @@ func NewProvider( SSHDefaultUser: sshDefaultUser, SSHEnabled: sshEnabled, uiURL: uiURL, + registryURL: registryURL, }, nil } @@ -191,8 +205,10 @@ func (p *provider) GenerateGITCloneSSHURL(_ context.Context, repoPath string) st } func (p *provider) GenerateUIBuildURL(_ context.Context, repoPath, pipelineIdentifier string, seqNumber int64) string { - return p.uiURL.JoinPath(repoPath, "pipelines", - pipelineIdentifier, "execution", strconv.Itoa(int(seqNumber))).String() + return p.uiURL.JoinPath( + repoPath, "pipelines", + pipelineIdentifier, "execution", strconv.Itoa(int(seqNumber)), + ).String() } func (p *provider) GenerateUIRepoURL(_ context.Context, repoPath string) string { @@ -219,6 +235,10 @@ func (p *provider) GetAPIProto(context.Context) string { return p.apiURL.Scheme } +func (p *provider) RegistryURL() string { + return p.registryURL.String() +} + func BuildGITCloneSSHURL(user string, sshURL *url.URL, repoPath string) string { repoPath = path.Clean(repoPath) if !strings.HasSuffix(repoPath, GITSuffix) { diff --git a/app/url/wire.go b/app/url/wire.go index 6ea88516e..caeb40c59 100644 --- a/app/url/wire.go +++ b/app/url/wire.go @@ -33,5 +33,6 @@ func ProvideURLProvider(config *types.Config) (Provider, error) { config.SSH.DefaultUser, config.SSH.Enable, config.URL.UI, + config.URL.Registry, ) } diff --git a/audit/audit.go b/audit/audit.go index 869120684..4fee4f87a 100644 --- a/audit/audit.go +++ b/audit/audit.go @@ -54,17 +54,22 @@ func (a Action) Validate() error { type ResourceType string const ( - ResourceTypeRepository ResourceType = "repository" - ResourceTypeBranchRule ResourceType = "branch_rule" - ResourceTypeRepositorySettings ResourceType = "repository_settings" + ResourceTypeRepository ResourceType = "repository" + ResourceTypeBranchRule ResourceType = "branch_rule" + ResourceTypeRepositorySettings ResourceType = "repository_settings" + ResourceTypeRegistry ResourceType = "registry" + ResourceTypeRegistryUpstreamProxy ResourceType = "registry_upstream_proxy" ) func (a ResourceType) Validate() error { switch a { case ResourceTypeRepository, ResourceTypeBranchRule, - ResourceTypeRepositorySettings: + ResourceTypeRepositorySettings, + ResourceTypeRegistry, + ResourceTypeRegistryUpstreamProxy: return nil + default: return ErrResourceTypeUndefined } diff --git a/audit/objects.go b/audit/objects.go index 06cb7ba5a..a5cb9812a 100644 --- a/audit/objects.go +++ b/audit/objects.go @@ -14,7 +14,12 @@ package audit -import "github.com/harness/gitness/types" +import ( + "time" + + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types" +) // RepositoryObject is the object used for emitting repository related audits. // TODO: ensure audit only takes audit related objects? @@ -22,3 +27,18 @@ type RepositoryObject struct { types.Repository IsPublic bool `yaml:"is_public"` } + +type RegistryObject struct { + registrytypes.Registry +} +type RegistryUpstreamProxyConfigObject struct { + ID int64 + RegistryID int64 + Source string + URL string + AuthType string + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/cli/operations/migrate/current.go b/cli/operations/migrate/current.go index 01ff56c6a..0b92749a4 100644 --- a/cli/operations/migrate/current.go +++ b/cli/operations/migrate/current.go @@ -34,6 +34,7 @@ func (c *commandCurrent) run(*kingpin.ParseContext) error { defer cancel() db, err := getDB(ctx, c.envfile) + if err != nil { return err } diff --git a/cli/operations/server/config.go b/cli/operations/server/config.go index 2cd30d310..677e9178b 100644 --- a/cli/operations/server/config.go +++ b/cli/operations/server/config.go @@ -194,6 +194,9 @@ func backfillURLs(config *types.Config) error { if config.URL.UI == "" { config.URL.UI = baseURL.String() } + if config.URL.Registry == "" { + config.URL.Registry = baseURL.String() + } return nil } @@ -238,22 +241,26 @@ func getSanitizedMachineName() (string, error) { norm.NFD, runes.ReplaceIllFormed(), runes.Remove(runes.In(unicode.Mn)), - runes.Map(func(r rune) rune { - switch { - case 'A' <= r && r <= 'Z': - return r + 32 - case 'a' <= r && r <= 'z': - return r - case '0' <= r && r <= '9': - return r - case r == '-', r == '.': - return r - default: - return '_' - } - }), - norm.NFC), - hostName) + runes.Map( + func(r rune) rune { + switch { + case 'A' <= r && r <= 'Z': + return r + 32 + case 'a' <= r && r <= 'z': + return r + case '0' <= r && r <= '9': + return r + case r == '-', r == '.': + return r + default: + return '_' + } + }, + ), + norm.NFC, + ), + hostName, + ) if err != nil { return "", err } diff --git a/cmd/gitness/wire.go b/cmd/gitness/wire.go index 190bfa660..63fb09883 100644 --- a/cmd/gitness/wire.go +++ b/cmd/gitness/wire.go @@ -1,6 +1,16 @@ -// Copyright 2021 Harness Inc. All rights reserved. -// Use of this source code is governed by the Polyform Free Trial License -// that can be found in the LICENSE.md file for this repository. +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //go:build wireinject // +build wireinject diff --git a/cmd/gitness/wire_gen.go b/cmd/gitness/wire_gen.go index 5d2ff6d27..d40fc2864 100644 --- a/cmd/gitness/wire_gen.go +++ b/cmd/gitness/wire_gen.go @@ -63,7 +63,7 @@ import ( "github.com/harness/gitness/app/pipeline/runner" "github.com/harness/gitness/app/pipeline/scheduler" "github.com/harness/gitness/app/pipeline/triggerer" - "github.com/harness/gitness/app/router" + router2 "github.com/harness/gitness/app/router" server2 "github.com/harness/gitness/app/server" "github.com/harness/gitness/app/services" "github.com/harness/gitness/app/services/aiagent" @@ -113,6 +113,12 @@ import ( "github.com/harness/gitness/livelog" "github.com/harness/gitness/lock" "github.com/harness/gitness/pubsub" + api2 "github.com/harness/gitness/registry/app/api" + "github.com/harness/gitness/registry/app/api/router" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/docker" + database2 "github.com/harness/gitness/registry/app/store/database" + "github.com/harness/gitness/registry/gc" "github.com/harness/gitness/ssh" "github.com/harness/gitness/store/database/dbtx" "github.com/harness/gitness/types" @@ -396,7 +402,36 @@ func initSystem(ctx context.Context, config *types.Config) (*server.System, erro } aiagentController := aiagent2.ProvideController(authorizer, harnessIntelligence, repoStore, pipelineStore, executionStore) openapiService := openapi.ProvideOpenAPIService() - routerRouter := router.ProvideRouter(ctx, config, authenticator, repoController, reposettingsController, executionController, logsController, spaceController, pipelineController, secretController, triggerController, connectorController, templateController, pluginController, pullreqController, webhookController, githookController, gitInterface, serviceaccountController, controller, principalController, checkController, systemController, uploadController, keywordsearchController, infraproviderController, gitspaceController, migrateController, aiagentController, capabilitiesController, provider, openapiService) + storageDriver, err := api2.BlobStorageProvider(config) + if err != nil { + return nil, err + } + storageDeleter := gc.StorageDeleterProvider(storageDriver) + mediaTypesRepository := database2.ProvideMediaTypeDao(db) + blobRepository := database2.ProvideBlobDao(db, mediaTypesRepository) + storageService := docker.StorageServiceProvider(config, storageDriver) + manifestRepository := database2.ProvideManifestDao(db, mediaTypesRepository) + gcService := gc.ServiceProvider() + app := docker.NewApp(ctx, db, storageDeleter, blobRepository, spaceStore, config, storageService, mediaTypesRepository, manifestRepository, gcService) + registryRepository := database2.ProvideRepoDao(db, mediaTypesRepository) + manifestReferenceRepository := database2.ProvideManifestRefDao(db) + tagRepository := database2.ProvideTagDao(db) + artifactRepository := database2.ProvideArtifactDao(db) + artifactStatRepository := database2.ProvideArtifactStatDao(db) + layerRepository := database2.ProvideLayerDao(db, mediaTypesRepository) + manifestService := docker.ManifestServiceProvider(registryRepository, manifestRepository, blobRepository, mediaTypesRepository, manifestReferenceRepository, tagRepository, artifactRepository, artifactStatRepository, layerRepository, gcService, transactor) + registryBlobRepository := database2.ProvideRegistryBlobDao(db) + localRegistry := docker.LocalRegistryProvider(app, manifestService, blobRepository, registryRepository, manifestRepository, registryBlobRepository, mediaTypesRepository, tagRepository, artifactRepository, artifactStatRepository, gcService, transactor) + upstreamProxyConfigRepository := database2.ProvideUpstreamDao(db, registryRepository) + remoteRegistry := docker.RemoteRegistryProvider(localRegistry, app, upstreamProxyConfigRepository, secretStore, encrypter) + coreController := pkg.CoreControllerProvider(registryRepository) + dockerController := docker.ControllerProvider(localRegistry, remoteRegistry, coreController, spaceStore, authorizer) + handler := api2.NewHandlerProvider(dockerController, spaceStore, tokenStore, controller, authenticator, provider, authorizer) + registryOCIHandler := router.OCIHandlerProvider(handler) + cleanupPolicyRepository := database2.ProvideCleanupPolicyDao(db, transactor) + apiHandler := router.APIHandlerProvider(registryRepository, upstreamProxyConfigRepository, tagRepository, manifestRepository, cleanupPolicyRepository, artifactRepository, storageDriver, spaceStore, transactor, authenticator, provider, authorizer, auditService) + appRouter := router.AppRouterProvider(registryOCIHandler, apiHandler) + routerRouter := router2.ProvideRouter(ctx, config, authenticator, repoController, reposettingsController, executionController, logsController, spaceController, pipelineController, secretController, triggerController, connectorController, templateController, pluginController, pullreqController, webhookController, githookController, gitInterface, serviceaccountController, controller, principalController, checkController, systemController, uploadController, keywordsearchController, infraproviderController, gitspaceController, migrateController, aiagentController, capabilitiesController, provider, openapiService, appRouter) serverServer := server2.ProvideServer(config, routerRouter) publickeyService := publickey.ProvidePublicKey(publicKeyStore, principalInfoCache) sshServer := ssh.ProvideServer(config, publickeyService, repoController) diff --git a/go.mod b/go.mod index a089787c2..8977cb618 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,8 @@ require ( github.com/bmatcuk/doublestar/v4 v4.6.1 github.com/coreos/go-semver v0.3.1 github.com/dchest/uniuri v1.2.0 + github.com/distribution/distribution/v3 v3.0.0-alpha.1 + github.com/distribution/reference v0.6.0 github.com/docker/docker v27.1.1+incompatible github.com/docker/go-connections v0.5.0 github.com/drone-runners/drone-runner-docker v1.8.4-0.20240815103043-c6c3a3e33ce3 @@ -21,23 +23,30 @@ require ( github.com/drone/go-scm v1.38.4 github.com/drone/runner-go v1.12.0 github.com/drone/spec v0.0.0-20230920145636-3827abdce961 + github.com/dustin/go-humanize v1.0.1 github.com/fatih/color v1.17.0 github.com/gabriel-vasile/mimetype v1.4.4 + github.com/getkin/kin-openapi v0.123.0 github.com/gliderlabs/ssh v0.3.7 github.com/go-chi/chi v1.5.5 + github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/cors v1.2.1 github.com/go-redis/redis/v8 v8.11.5 github.com/go-redsync/redsync/v4 v4.13.0 github.com/golang-jwt/jwt v3.2.2+incompatible + github.com/golang-migrate/migrate/v4 v4.17.1 github.com/google/go-cmp v0.6.0 github.com/google/go-jsonnet v0.20.0 github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 + github.com/gorilla/mux v1.8.1 github.com/gotidy/ptr v1.4.0 github.com/guregu/null v4.0.0+incompatible github.com/harness/harness-migrate v0.21.1-0.20240804180936-b1de602aa8e7 github.com/hashicorp/go-multierror v1.1.1 + github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 + github.com/jackc/pgx/v5 v5.5.5 github.com/jmoiron/sqlx v1.4.0 github.com/joho/godotenv v1.5.1 github.com/kelseyhightower/envconfig v1.4.0 @@ -47,6 +56,9 @@ require ( github.com/matoous/go-nanoid/v2 v2.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.22 + github.com/oapi-codegen/runtime v1.1.1 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.1.0 github.com/pkg/errors v0.9.1 github.com/rs/xid v1.5.0 github.com/rs/zerolog v1.33.0 @@ -55,6 +67,8 @@ require ( github.com/stretchr/testify v1.9.0 github.com/swaggest/openapi-go v0.2.23 github.com/swaggest/swgui v1.8.1 + github.com/swaggo/http-swagger v1.3.4 + github.com/swaggo/swag v1.16.2 github.com/unrolled/secure v1.15.0 github.com/zricethezav/gitleaks/v8 v8.18.5-0.20240614204812-26f34692fac6 go.starlark.net v0.0.0-20231121155337-90ade8b19d09 @@ -78,19 +92,18 @@ require ( cloud.google.com/go/iam v1.1.12 // indirect dario.cat/mergo v1.0.0 // indirect github.com/99designs/httpsignatures-go v0.0.0-20170731043157-88528bf4ca7e // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BobuSumisu/aho-corasick v1.0.3 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/antonmedv/expr v1.15.5 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/buildkite/yaml v2.1.0+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect github.com/charmbracelet/lipgloss v0.12.1 // indirect github.com/charmbracelet/x/ansi v0.1.4 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst v1.0.3 // indirect github.com/fatih/semgroup v1.2.0 // indirect @@ -99,6 +112,10 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gitleaks/go-gitdiff v0.9.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/swag v0.22.8 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/s2a-go v0.1.8 // indirect @@ -106,26 +123,29 @@ require ( github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jackc/pgx/v4 v4.12.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/muesli/termenv v0.15.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/natessilva/dag v0.0.0-20180124060714-7194b8dcc5c4 // indirect github.com/onsi/gomega v1.27.10 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect @@ -134,14 +154,14 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.10.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240723171418-e6d459c13d2a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a // indirect @@ -180,10 +200,12 @@ require ( github.com/vearutop/statigz v1.4.0 // indirect github.com/yuin/goldmark v1.4.13 golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect + golang.org/x/net v0.27.0 golang.org/x/sys v0.22.0 // indirect golang.org/x/tools v0.23.0 // indirect google.golang.org/genproto v0.0.0-20240722135656-d784300faade // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 ) + +replace github.com/harness/gitness/registry => ./registry diff --git a/go.sum b/go.sum index 1d3ff140c..398899cc0 100644 --- a/go.sum +++ b/go.sum @@ -30,12 +30,15 @@ github.com/BobuSumisu/aho-corasick v1.0.3 h1:uuf+JHwU9CHP2Vx+wAy6jcksJThhJS9ehR8 github.com/BobuSumisu/aho-corasick v1.0.3/go.mod h1:hm4jLcvZKI2vRF2WDU1N4p/jpWtpOzp3nLmi9AzX/XE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -57,6 +60,8 @@ github.com/antonmedv/expr v1.15.5 h1:y0Iz3cEwmpRz5/r3w4qQR0MfIqJGdGM1zbhD/v0G5Vg github.com/antonmedv/expr v1.15.5/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -85,9 +90,10 @@ github.com/bool64/shared v0.1.5/go.mod h1:081yz68YC9jeFB3+Bbmno2RFWvGKv1lPKkMP6M github.com/buildkite/yaml v2.1.0+incompatible h1:xirI+ql5GzfikVNDmt+yeiXpf/v1Gt03qXTtT5WXdr8= github.com/buildkite/yaml v2.1.0+incompatible/go.mod h1:UoU8vbcwu1+vjZq01+KrpSeLBgQQIjL/H7Y6KwikUrI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -115,6 +121,7 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -124,6 +131,8 @@ github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/distribution/v3 v3.0.0-alpha.1 h1:jn7I1gvjOvmLztH1+1cLiUFud7aeJCIQcgzugtwjyJo= +github.com/distribution/distribution/v3 v3.0.0-alpha.1/go.mod h1:LCp4JZp1ZalYg0W/TN05jarCQu+h4w7xc7ZfQF4Y/cY= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o= @@ -132,8 +141,9 @@ github.com/djherbis/buffer v1.2.0/go.mod h1:fjnebbZjCUpPinBRD+TDwXSOeNQ7fPQWLfGQ github.com/djherbis/nio/v3 v3.0.1 h1:6wxhnuppteMa6RHA4L81Dq7ThkZH8SwnDzXDYy95vB4= github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmWgZxOcmg= github.com/docker/distribution v0.0.0-20170726174610-edc3ab29cdff/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -167,6 +177,8 @@ github.com/drone/signal v1.0.0/go.mod h1:S8t92eFT0g4WUgEc/LxG+LCuiskpMNsG0ajAMGn github.com/drone/spec v0.0.0-20230920145636-3827abdce961 h1:aUWrLS2ghyxIpDICpZOV50V1x7JLM3U80UQDQxMKT54= github.com/drone/spec v0.0.0-20230920145636-3827abdce961/go.mod h1:KyQZA9qwuscbbM7yTrtZg25Wammoc5GKwaRem8kDA5k= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -192,6 +204,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= +github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= +github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gitleaks/go-gitdiff v0.9.0 h1:SHAU2l0ZBEo8g82EeFewhVy81sb7JCxW76oSPtR/Nqg= @@ -200,6 +214,8 @@ github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-chi/chi v1.5.5 h1:vOB/HbEMt9QqBqErz07QehcOKHaWFtuj87tTDVz2qXE= github.com/go-chi/chi v1.5.5/go.mod h1:C9JqLr3tIYjDOZpzn+BCuxY8z8vmca43EeMgyZt7irw= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -215,6 +231,18 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.8 h1:/9RjDSQ0vbFR+NyjGMkFTsA1IA0fmhKSThmfGZjicbw= +github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= @@ -228,9 +256,10 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v0.0.0-20170307180453-100ba4e88506/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -240,6 +269,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -305,6 +336,8 @@ github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b0 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gotidy/ptr v1.4.0 h1:7++suUs+HNHMnyz6/AW3SE+4EnBhupPSQTSI7QNijVc= github.com/gotidy/ptr v1.4.0/go.mod h1:MjRBG6/IETiiZGWI8LrRtISXEji+8b/jigmj2q0mEyM= @@ -359,6 +392,8 @@ github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36 github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -370,14 +405,14 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= -github.com/jackc/pgconn v1.9.0 h1:gqibKSTJup/ahCsNKyMZAniPuZEfIqfXFc8FOWVYR+Q= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= @@ -389,11 +424,12 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1: github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= @@ -401,8 +437,8 @@ github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4 github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= -github.com/jackc/pgtype v1.8.0 h1:iFVCcVhYlw0PulYCVoguRGm0SE9guIcPcccnLzHj8bA= -github.com/jackc/pgtype v1.8.0/go.mod h1:PqDKcEBtllAtk/2p6z6SHdXW5UB+MhE75tUol2OKexE= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= @@ -410,8 +446,10 @@ github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXg github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= -github.com/jackc/pgx/v4 v4.12.0 h1:xiP3TdnkwyslWNp77yE5XAPfxAsU9RMFDe0c1SwN8h4= -github.com/jackc/pgx/v4 v4.12.0/go.mod h1:fE547h6VulLPA3kySjfnSG/e2D861g/50JlVUa/ub60= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -427,11 +465,14 @@ github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -457,7 +498,6 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -467,6 +507,11 @@ github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maragudk/migrate v0.4.3 h1:3NrpSzNdCSSPgN/xwkEduEwqrBIRewSEvtN+mhMS6zc= github.com/maragudk/migrate v0.4.3/go.mod h1:vhmL4s+Xz75KU6DPZWRfqb45YyqjYQfcXliA1DsYzvY= github.com/matoous/go-nanoid v1.5.0 h1:VRorl6uCngneC4oUQqOYtO3S0H5QKFtKuKycFG3euek= @@ -517,6 +562,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= @@ -534,8 +581,11 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -567,6 +617,8 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -614,8 +666,8 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= @@ -639,7 +691,6 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -663,6 +714,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -676,6 +728,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -697,7 +750,15 @@ github.com/swaggest/refl v1.1.0 h1:a+9a75Kv6ciMozPjVbOfcVTEQe81t2R3emvaD9oGQGc= github.com/swaggest/refl v1.1.0/go.mod h1:g3Qa6ki0A/L2yxiuUpT+cuBURuRaltF5SDQpg1kMZSY= github.com/swaggest/swgui v1.8.1 h1:OLcigpoelY0spbpvp6WvBt0I1z+E9egMQlUeEKya+zU= github.com/swaggest/swgui v1.8.1/go.mod h1:YBaAVAwS3ndfvdtW8A4yWDJpge+W57y+8kW+f/DqZtU= +github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY= +github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= +github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64a5ww= +github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ= +github.com/swaggo/swag v1.16.2 h1:28Pp+8DkQoV+HLzLx8RGJZXNGKbFqnuvSbAAtoxiY04= +github.com/swaggo/swag v1.16.2/go.mod h1:6YzXnDcpr0767iOejs318CwYkCQqyGer6BizOg03f+E= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/unrolled/secure v1.15.0 h1:q7x+pdp8jAHnbzxu6UheP8fRlG/rwYTb8TPuQ3rn9Og= github.com/unrolled/secure v1.15.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -730,8 +791,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= @@ -748,6 +809,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -770,10 +833,8 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= @@ -818,6 +879,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -861,14 +923,13 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -880,7 +941,6 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -892,7 +952,6 @@ golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -986,6 +1045,7 @@ gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gG gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1009,6 +1069,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= diff --git a/registry/app/api/controller/metadata/artifact_mapper.go b/registry/app/api/controller/metadata/artifact_mapper.go new file mode 100644 index 000000000..49602139d --- /dev/null +++ b/registry/app/api/controller/metadata/artifact_mapper.go @@ -0,0 +1,280 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "errors" + "path/filepath" + + artifactapi "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" +) + +func GetArtifactMetadata(artifacts *[]types.ArtifactMetadata) []artifactapi.ArtifactMetadata { + artifactMetadataList := make([]artifactapi.ArtifactMetadata, 0, len(*artifacts)) + for _, artifact := range *artifacts { + artifactMetadata := mapToArtifactMetadata(artifact) + artifactMetadataList = append(artifactMetadataList, *artifactMetadata) + } + return artifactMetadataList +} + +func mapToArtifactMetadata(artifact types.ArtifactMetadata) *artifactapi.ArtifactMetadata { + lastModified := GetTimeInMs(artifact.ModifiedAt) + packageType := artifact.PackageType + return &artifactapi.ArtifactMetadata{ + RegistryIdentifier: artifact.RepoName, + Name: artifact.Name, + LatestVersion: artifact.LatestVersion, + Labels: &artifact.Labels, + LastModified: &lastModified, + PackageType: &packageType, + DownloadsCount: &artifact.DownloadCount, + } +} + +func toPackageType(packageTypeStr string) (artifactapi.PackageType, error) { + switch packageTypeStr { + case string(artifactapi.PackageTypeDOCKER): + return artifactapi.PackageTypeDOCKER, nil + case string(artifactapi.PackageTypeGENERIC): + return artifactapi.PackageTypeGENERIC, nil + case string(artifactapi.PackageTypeHELM): + return artifactapi.PackageTypeHELM, nil + case string(artifactapi.PackageTypeMAVEN): + return artifactapi.PackageTypeMAVEN, nil + default: + return "", errors.New("invalid package type") + } +} + +func GetTagMetadata( + ctx context.Context, + tags *[]types.TagMetadata, + latestTag string, + image string, + regIdentifier string, + rootIdentifier string, + registryURL string, +) []artifactapi.ArtifactVersionMetadata { + artifactVersionMetadataList := []artifactapi.ArtifactVersionMetadata{} + digestCount := int64(1) + for _, tag := range *tags { + modifiedAt := GetTimeInMs(tag.ModifiedAt) + size := GetImageSize(tag.Size) + isLatestVersion := latestTag == tag.Name + command := GetPullCommand(rootIdentifier, regIdentifier, image, tag.Name, string(tag.PackageType), registryURL) + packageType, err := toPackageType(string(tag.PackageType)) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("Error converting package type %s", tag.PackageType) + continue + } + artifactVersionMetadata := &artifactapi.ArtifactVersionMetadata{ + PackageType: &packageType, + Name: tag.Name, + Size: &size, + LastModified: &modifiedAt, + DigestCount: &digestCount, + IslatestVersion: &isLatestVersion, + PullCommand: &command, + } + artifactVersionMetadataList = append(artifactVersionMetadataList, *artifactVersionMetadata) + } + return artifactVersionMetadataList +} + +func GetAllArtifactResponse( + artifacts *[]types.ArtifactMetadata, + count int64, + pageNumber int64, + pageSize int, +) *artifactapi.ListArtifactResponseJSONResponse { + artifactMetadataList := GetArtifactMetadata(artifacts) + pageCount := GetPageCount(count, pageSize) + listArtifact := &artifactapi.ListArtifact{ + ItemCount: &count, + PageCount: &pageCount, + PageIndex: &pageNumber, + PageSize: &pageSize, + Artifacts: artifactMetadataList, + } + response := &artifactapi.ListArtifactResponseJSONResponse{ + Data: *listArtifact, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetAllArtifactLabelsResponse( + artifactLabels *[]string, + count int64, + pageNumber int64, + pageSize int, +) *artifactapi.ListArtifactLabelResponseJSONResponse { + pageCount := GetPageCount(count, pageSize) + listArtifactLabels := &artifactapi.ListArtifactLabel{ + ItemCount: &count, + PageCount: &pageCount, + PageIndex: &pageNumber, + PageSize: &pageSize, + Labels: *artifactLabels, + } + response := &artifactapi.ListArtifactLabelResponseJSONResponse{ + Data: *listArtifactLabels, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetAllArtifactVersionResponse( + ctx context.Context, + tags *[]types.TagMetadata, + latestTag string, + image string, + count int64, + regInfo *RegistryRequestInfo, + pageNumber int64, + pageSize int, + rootIdentifier string, + registryURL string, +) *artifactapi.ListArtifactVersionResponseJSONResponse { + artifactVersionMetadataList := GetTagMetadata( + ctx, tags, latestTag, image, + regInfo.RegistryIdentifier, rootIdentifier, registryURL, + ) + pageCount := GetPageCount(count, pageSize) + listArtifactVersions := &artifactapi.ListArtifactVersion{ + ItemCount: &count, + PageCount: &pageCount, + PageIndex: &pageNumber, + PageSize: &pageSize, + ArtifactVersions: &artifactVersionMetadataList, + } + response := &artifactapi.ListArtifactVersionResponseJSONResponse{ + Data: *listArtifactVersions, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetDockerArtifactDetails( + registry *types.Registry, + tag *types.TagDetail, + manifest *types.Manifest, + isLatestTag bool, + regInfo *RegistryRequestBaseInfo, + registryURL string, +) *artifactapi.DockerArtifactDetailResponseJSONResponse { + repoPath := getRepoPath(registry.Name, tag.ImageName, manifest.Digest.String()) + pullCommand := GetDockerPullCommand(regInfo.rootIdentifier, registry.Name, tag.ImageName, tag.Name, registryURL) + createdAt := GetTimeInMs(tag.CreatedAt) + modifiedAt := GetTimeInMs(tag.UpdatedAt) + size := GetSize(manifest.TotalSize) + artifactDetail := &artifactapi.DockerArtifactDetail{ + ImageName: tag.ImageName, + Version: tag.Name, + PackageType: registry.PackageType, + IsLatestVersion: &isLatestTag, + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + RegistryPath: repoPath, + PullCommand: &pullCommand, + Url: GetTagURL(regInfo.rootIdentifier, tag.ImageName, tag.Name, registry.Name, registryURL), + Size: &size, + } + + response := &artifactapi.DockerArtifactDetailResponseJSONResponse{ + Data: *artifactDetail, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetHelmArtifactDetails( + registry *types.Registry, + tag *types.TagDetail, + manifest *types.Manifest, + isLatestTag bool, + rootIdentifier string, + registryURL string, +) *artifactapi.HelmArtifactDetailResponseJSONResponse { + repoPath := getRepoPath(registry.Name, tag.ImageName, manifest.Digest.String()) + pullCommand := GetHelmPullCommand(rootIdentifier, registry.Name, tag.ImageName, tag.Name, registryURL) + createdAt := GetTimeInMs(tag.CreatedAt) + modifiedAt := GetTimeInMs(tag.UpdatedAt) + size := GetSize(manifest.TotalSize) + artifactDetail := &artifactapi.HelmArtifactDetail{ + Artifact: &tag.ImageName, + Version: tag.Name, + PackageType: registry.PackageType, + IsLatestVersion: &isLatestTag, + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + RegistryPath: repoPath, + PullCommand: &pullCommand, + Url: GetTagURL(rootIdentifier, tag.ImageName, tag.Name, registry.Name, registryURL), + Size: &size, + } + + response := &artifactapi.HelmArtifactDetailResponseJSONResponse{ + Data: *artifactDetail, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetArtifactSummary(artifact types.ArtifactMetadata) *artifactapi.ArtifactSummaryResponseJSONResponse { + downloads := int64(0) + createdAt := GetTimeInMs(artifact.CreatedAt) + modifiedAt := GetTimeInMs(artifact.ModifiedAt) + artifactVersionSummary := &artifactapi.ArtifactSummary{ + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + DownloadsCount: &downloads, + ImageName: artifact.Name, + Labels: &artifact.Labels, + PackageType: artifact.PackageType, + } + response := &artifactapi.ArtifactSummaryResponseJSONResponse{ + Data: *artifactVersionSummary, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func GetArtifactVersionSummary( + tag *types.TagMetadata, + artifactName string, + isLatestTag bool, +) *artifactapi.ArtifactVersionSummaryResponseJSONResponse { + artifactVersionSummary := &artifactapi.ArtifactVersionSummary{ + ImageName: artifactName, + IsLatestVersion: &isLatestTag, + PackageType: tag.PackageType, + Version: tag.Name, + } + response := &artifactapi.ArtifactVersionSummaryResponseJSONResponse{ + Data: *artifactVersionSummary, + Status: artifactapi.StatusSUCCESS, + } + return response +} + +func getRepoPath(registry string, image string, tag string) string { + return filepath.Join(registry, image, tag) +} diff --git a/registry/app/api/controller/metadata/base.go b/registry/app/api/controller/metadata/base.go new file mode 100644 index 000000000..423bc61a1 --- /dev/null +++ b/registry/app/api/controller/metadata/base.go @@ -0,0 +1,349 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/harness/gitness/app/paths" + api "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" + "github.com/harness/gitness/registry/types" + + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +const MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + +var _ api.StrictServerInterface = (*APIController)(nil) + +type RegistryRequestBaseInfo struct { + rootIdentifier string + rootIdentifierID int64 + + registryRef string + RegistryIdentifier string + registryID int64 + + parentRef string + parentID int64 +} + +type RegistryRequestInfo struct { + RegistryRequestBaseInfo + packageTypes []string + sortByField string + sortByOrder string + offset int + limit int + pageNumber int64 + searchTerm string + labels []string +} + +// GetRegistryRequestBaseInfo returns the base info for the registry request +// One of the regRefParam or (parentRefParam + regIdentifierParam) should be provided. +func (c *APIController) GetRegistryRequestBaseInfo( + ctx context.Context, + parentRef string, + regRef string, +) (*RegistryRequestBaseInfo, error) { + // ---------- CHECKS ------------ + if commons.IsEmpty(parentRef) && !commons.IsEmpty(regRef) { + parentRef, _, _ = paths.DisectLeaf(regRef) + } + + // ---------- PARENT ------------ + if commons.IsEmpty(parentRef) { + return nil, fmt.Errorf("parent reference is required") + } + rootIdentifier, _, err := paths.DisectRoot(parentRef) + if err != nil { + return nil, fmt.Errorf("invalid parent reference: %w", err) + } + + rootSpace, err := c.spaceStore.FindByRef(ctx, rootIdentifier) + if err != nil { + return nil, fmt.Errorf("root space not found: %w", err) + } + parentSpace, err := c.spaceStore.FindByRef(ctx, parentRef) + if err != nil { + return nil, fmt.Errorf("parent space not found: %w", err) + } + rootIdentifierID := rootSpace.ID + parentID := parentSpace.ID + + baseInfo := &RegistryRequestBaseInfo{ + parentRef: parentRef, + parentID: parentID, + rootIdentifier: rootIdentifier, + rootIdentifierID: rootIdentifierID, + } + + // ---------- REGISTRY ------------ + if !commons.IsEmpty(regRef) { + _, regIdentifier, _ := paths.DisectLeaf(regRef) + + reg, getRegistryErr := c.RegistryRepository.GetByParentIDAndName(ctx, parentID, regIdentifier) + if getRegistryErr != nil { + return nil, fmt.Errorf("registry not found: %w", err) + } + + baseInfo.registryRef = regRef + baseInfo.RegistryIdentifier = regIdentifier + baseInfo.registryID = reg.ID + } + + return baseInfo, nil +} + +func (c *APIController) GetRegistryRequestInfo( + ctx context.Context, + packageTypesParam *api.PackageTypeParam, + page *api.PageNumber, + size *api.PageSize, + search *api.SearchTerm, + resource string, + parentRef string, + regRef string, + labelsParam *api.LabelsParam, + sortOrder *api.SortOrder, + sortField *api.SortField, +) (*RegistryRequestInfo, error) { + packageTypes := []string{} + if packageTypesParam != nil { + packageTypes = *packageTypesParam + } + sortByField := "" + sortByOrder := "" + if sortOrder != nil { + sortByOrder = string(*sortOrder) + } + + if sortField != nil { + sortByField = string(*sortField) + } + + labels := []string{} + + if labelsParam != nil { + labels = *labelsParam + } + + sortByField = GetSortByField(sortByField, resource) + sortByOrder = GetSortByOrder(sortByOrder) + + offset := GetOffset(size, page) + limit := GetPageLimit(size) + pageNumber := GetPageNumber(page) + + searchTerm := "" + if search != nil { + searchTerm = string(*search) + } + + baseInfo, err := c.GetRegistryRequestBaseInfo(ctx, parentRef, regRef) + if err != nil { + return nil, err + } + + return &RegistryRequestInfo{ + RegistryRequestBaseInfo: *baseInfo, + packageTypes: packageTypes, + sortByField: sortByField, + sortByOrder: sortByOrder, + offset: offset, + limit: limit, + pageNumber: pageNumber, + searchTerm: searchTerm, + labels: labels, + }, nil +} + +func getManifestConfig( + ctx context.Context, + digest digest.Digest, + rootRef string, + driver storagedriver.StorageDriver, +) (*manifestConfig, error) { + var config manifestConfig + path, err := storage.PathFn(rootRef, digest) + if err != nil { + return nil, fmt.Errorf("failed to get path: %w", err) + } + + content, err := driver.GetContent(ctx, path) + if err != nil { + return nil, fmt.Errorf("failed to get content for image config: %w", err) + } + if err := json.Unmarshal(content, &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal manifest config: %w", err) + } + + return &config, nil +} + +func (c *APIController) setUpstreamProxyIDs( + ctx context.Context, + registry *types.Registry, + dto api.RegistryRequest, + parentID int64, +) error { + if dto.Config.Type != api.RegistryTypeVIRTUAL { + return fmt.Errorf("invalid call to set upstream proxy ids for parentID: %d", parentID) + } + virtualConfig, err := dto.Config.AsVirtualConfig() + if err != nil { + return fmt.Errorf("failed to get virtualConfig: %w", err) + } + if nil == virtualConfig.UpstreamProxies || commons.IsEmpty(*(virtualConfig.UpstreamProxies)) { + log.Ctx(ctx).Debug().Msgf("Nothing to do for registryRequest: %s", dto.Identifier) + return nil + } + + upstreamProxies, err := c.RegistryRepository.FetchUpstreamProxyIDs( + ctx, + *virtualConfig.UpstreamProxies, + parentID, + ) + if err != nil { + return fmt.Errorf("failed to fectch upstream proxy IDs :%w", err) + } + registry.UpstreamProxies = upstreamProxies + return nil +} + +func (c *APIController) getUpstreamProxyKeys(ctx context.Context, ids []int64) []string { + repoKeys, _ := c.RegistryRepository.FetchUpstreamProxyKeys(ctx, ids) + return repoKeys +} + +type manifestConfig struct { + CreatedAt *string `json:"created,omitempty"` + Digest string `json:"digest,omitempty"` + History []historyEntry `json:"history"` + ModifiedAt *string `json:"modified,omitempty"` + Os string `json:"os"` + Arch string `json:"architecture,omitempty"` +} + +type historyEntry struct { + Created string `json:"created"` + CreatedBy string `json:"created_by"` + EmptyLayer bool `json:"empty_layer"` + Comment string `json:"comment,omitempty"` +} + +func getRepoEntityFields(dto api.RegistryRequest) ([]string, []string, string, []string) { + allowedPattern := []string{} + if dto.AllowedPattern != nil { + allowedPattern = *dto.AllowedPattern + } + blockedPattern := []string{} + if dto.BlockedPattern != nil { + blockedPattern = *dto.BlockedPattern + } + description := "" + if dto.Description != nil { + description = *dto.Description + } + labels := []string{} + if dto.Labels != nil { + labels = *dto.Labels + } + return allowedPattern, blockedPattern, description, labels +} + +func CreateVirtualRepositoryResponse( + registry *types.Registry, + upstreamProxyKeys []string, + cleanupPolicies *[]types.CleanupPolicy, + rootIdentifier string, + registryURL string, +) *api.RegistryResponseJSONResponse { + createdAt := GetTimeInMs(registry.CreatedAt) + modifiedAt := GetTimeInMs(registry.UpdatedAt) + allowedPattern := registry.AllowedPattern + blockedPattern := registry.BlockedPattern + labels := registry.Labels + + config := api.RegistryConfig{} + _ = config.FromVirtualConfig(api.VirtualConfig{UpstreamProxies: &upstreamProxyKeys}) + response := &api.RegistryResponseJSONResponse{ + Data: api.Registry{ + Identifier: registry.Name, + Description: ®istry.Description, + Url: GetRepoURL(rootIdentifier, registry.Name, registryURL), + PackageType: registry.PackageType, + AllowedPattern: &allowedPattern, + BlockedPattern: &blockedPattern, + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + CleanupPolicy: CreateCleanupPolicyResponse(cleanupPolicies), + Config: &config, + Labels: &labels, + }, + Status: api.StatusSUCCESS, + } + return response +} + +func CreateUpstreamProxyResponseJSONResponse(upstreamproxy *types.UpstreamProxy) *api.RegistryResponseJSONResponse { + createdAt := GetTimeInMs(upstreamproxy.CreatedAt) + modifiedAt := GetTimeInMs(upstreamproxy.UpdatedAt) + allowedPattern := upstreamproxy.AllowedPattern + blockedPattern := upstreamproxy.BlockedPattern + configAuth := &api.UpstreamConfig_Auth{} + + if api.AuthType(upstreamproxy.RepoAuthType) == api.AuthTypeUserPassword { + auth := api.UserPassword{} + auth.UserName = upstreamproxy.UserName + // FIXME: Mask this password. + auth.SecretIdentifier = &upstreamproxy.SecretIdentifier + auth.SecretSpaceId = &upstreamproxy.SecretSpaceID + _ = configAuth.FromUserPassword(auth) + } + + source := api.UpstreamConfigSource(upstreamproxy.Source) + + config := api.UpstreamConfig{ + AuthType: api.AuthType(upstreamproxy.RepoAuthType), + Auth: configAuth, + Source: &source, + Url: &upstreamproxy.RepoURL, + } + registryConfig := &api.RegistryConfig{} + _ = registryConfig.FromUpstreamConfig(config) + + response := &api.RegistryResponseJSONResponse{ + Data: api.Registry{ + Identifier: upstreamproxy.RepoKey, + PackageType: upstreamproxy.PackageType, + Url: upstreamproxy.RepoURL, + AllowedPattern: &allowedPattern, + BlockedPattern: &blockedPattern, + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + Config: registryConfig, + }, + Status: api.StatusSUCCESS, + } + return response +} diff --git a/registry/app/api/controller/metadata/cleanuppolicy_mapper.go b/registry/app/api/controller/metadata/cleanuppolicy_mapper.go new file mode 100644 index 000000000..cca723f5e --- /dev/null +++ b/registry/app/api/controller/metadata/cleanuppolicy_mapper.go @@ -0,0 +1,81 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "time" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" +) + +func CreateCleanupPolicyEntity( + config *artifact.ModifyRegistryJSONRequestBody, + repoID int64, +) *[]types.CleanupPolicy { + if config == nil || config.CleanupPolicy == nil { + return nil + } + + var cleanupPolicyEntities []types.CleanupPolicy + cleanupPolicyDto := *config.CleanupPolicy + + for _, value := range cleanupPolicyDto { + cleanupPolicyEntity := getCleanupPolicyEntity(value, repoID) + cleanupPolicyEntities = append(cleanupPolicyEntities, *cleanupPolicyEntity) + } + return &cleanupPolicyEntities +} + +func CreateCleanupPolicyResponse( + cleanupPolicyEntities *[]types.CleanupPolicy, +) *[]artifact.CleanupPolicy { + var cleanupPolicyDtos []artifact.CleanupPolicy + + for _, value := range *cleanupPolicyEntities { + cleanupPolicy := getCleanupPolicyDto(value) + cleanupPolicyDtos = append(cleanupPolicyDtos, *cleanupPolicy) + } + return &cleanupPolicyDtos +} + +func getCleanupPolicyEntity( + cleanupPolicy artifact.CleanupPolicy, + repoID int64, +) *types.CleanupPolicy { + expireTime := time.Duration(*cleanupPolicy.ExpireDays) * 24 * time.Hour + return &types.CleanupPolicy{ + Name: *cleanupPolicy.Name, + VersionPrefix: *cleanupPolicy.VersionPrefix, + PackagePrefix: *cleanupPolicy.PackagePrefix, + ExpiryTime: expireTime.Milliseconds(), + RegistryID: repoID, + } +} + +func getCleanupPolicyDto( + cleanupPolicy types.CleanupPolicy, +) *artifact.CleanupPolicy { + packagePrefix := cleanupPolicy.PackagePrefix + versionPrefix := cleanupPolicy.VersionPrefix + expiryDays := int(time.Duration(cleanupPolicy.ExpiryTime).Hours() / 24) + + return &artifact.CleanupPolicy{ + Name: &cleanupPolicy.Name, + VersionPrefix: &versionPrefix, + PackagePrefix: &packagePrefix, + ExpireDays: &expiryDays, + } +} diff --git a/registry/app/api/controller/metadata/controller.go b/registry/app/api/controller/metadata/controller.go new file mode 100644 index 000000000..d719b4d01 --- /dev/null +++ b/registry/app/api/controller/metadata/controller.go @@ -0,0 +1,71 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + urlprovider "github.com/harness/gitness/app/url" + "github.com/harness/gitness/audit" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/store/database/dbtx" +) + +// APIController simple struct. +type APIController struct { + ArtifactStore store.ArtifactRepository + RegistryRepository store.RegistryRepository + UpstreamProxyStore store.UpstreamProxyConfigRepository + TagStore store.TagRepository + ManifestStore store.ManifestRepository + CleanupPolicyStore store.CleanupPolicyRepository + spaceStore corestore.SpaceStore + tx dbtx.Transactor + StorageDriver storagedriver.StorageDriver + URLProvider urlprovider.Provider + Authorizer authz.Authorizer + AuditService audit.Service +} + +func NewAPIController( + repositoryStore store.RegistryRepository, + upstreamProxyStore store.UpstreamProxyConfigRepository, + tagStore store.TagRepository, + manifestStore store.ManifestRepository, + cleanupPolicyStore store.CleanupPolicyRepository, + artifactStore store.ArtifactRepository, + driver storagedriver.StorageDriver, + spaceStore corestore.SpaceStore, + tx dbtx.Transactor, + urlProvider urlprovider.Provider, + authorizer authz.Authorizer, + auditService audit.Service, +) *APIController { + return &APIController{ + RegistryRepository: repositoryStore, + UpstreamProxyStore: upstreamProxyStore, + TagStore: tagStore, + ManifestStore: manifestStore, + CleanupPolicyStore: cleanupPolicyStore, + ArtifactStore: artifactStore, + spaceStore: spaceStore, + StorageDriver: driver, + tx: tx, + URLProvider: urlProvider, + Authorizer: authorizer, + AuditService: auditService, + } +} diff --git a/registry/app/api/controller/metadata/create_registry.go b/registry/app/api/controller/metadata/create_registry.go new file mode 100644 index 000000000..677c04cc8 --- /dev/null +++ b/registry/app/api/controller/metadata/create_registry.go @@ -0,0 +1,321 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/app/auth" + "github.com/harness/gitness/audit" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types" + gitnessenum "github.com/harness/gitness/types/enum" + + "github.com/rs/zerolog/log" +) + +func (c *APIController) CreateRegistry( + ctx context.Context, + r artifact.CreateRegistryRequestObject, +) (artifact.CreateRegistryResponseObject, error) { + registryRequest := artifact.RegistryRequest(*r.Body) + parentRef := artifact.SpaceRefPathParam(*registryRequest.ParentRef) + + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, string(parentRef), "") + if err != nil { + return artifact.CreateRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.CreateRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + + session, _ := request.AuthSessionFrom(ctx) + if err = apiauth.CheckSpaceScope( + ctx, + c.Authorizer, + session, + space, + gitnessenum.ResourceTypeRegistry, + gitnessenum.PermissionRegistryEdit, + ); err != nil { + return artifact.CreateRegistry403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, err + } + + if registryRequest.Config.Type == artifact.RegistryTypeVIRTUAL { + return c.createVirtualRegistry(ctx, registryRequest, regInfo, session, parentRef) + } + registry, upstreamproxy, err := CreateUpstreamProxyEntity( + registryRequest, + regInfo.parentID, regInfo.rootIdentifierID, + ) + var registryID int64 + if err != nil { + return throwCreateRegistry400Error(err), err + } + + err = c.tx.WithTx( + ctx, func(ctx context.Context) error { + registryID, err = c.createRegistryWithAudit(ctx, registry, session.Principal, string(parentRef)) + + if err != nil { + return fmt.Errorf("failed to create registry: %w", err) + } + + upstreamproxy.RegistryID = registryID + + _, err = c.createUpstreamProxyWithAudit( + ctx, upstreamproxy, session.Principal, string(parentRef), registry.Name, + ) + + if err != nil { + return fmt.Errorf("failed to create upstream proxy: %w", err) + } + return nil + }, + ) + + if err != nil { + return throwCreateRegistry400Error(err), err + } + upstreamproxyEntity, err := c.UpstreamProxyStore.Get(ctx, registryID) + if err != nil { + return throwCreateRegistry400Error(err), err + } + + return artifact.CreateRegistry201JSONResponse{ + RegistryResponseJSONResponse: *CreateUpstreamProxyResponseJSONResponse(upstreamproxyEntity), + }, nil +} + +func (c *APIController) createVirtualRegistry( + ctx context.Context, registryRequest artifact.RegistryRequest, regInfo *RegistryRequestBaseInfo, + session *auth.Session, parentRef artifact.SpaceRefPathParam, +) (artifact.CreateRegistryResponseObject, error) { + registry, err := CreateRegistryEntity(registryRequest, regInfo.parentID, regInfo.rootIdentifierID) + if err != nil { + return throwCreateRegistry400Error(err), nil + } + err = c.setUpstreamProxyIDs(ctx, registry, registryRequest, regInfo.parentID) + if err != nil { + return throwCreateRegistry400Error(err), nil + } + id, err := c.createRegistryWithAudit(ctx, registry, session.Principal, string(parentRef)) + if err != nil { + return throwCreateRegistry400Error(err), nil + } + repoEntity, err := c.RegistryRepository.Get(ctx, id) + if err != nil { + return throwCreateRegistry400Error(err), nil + } + cleanupPolicies, err := c.CleanupPolicyStore.GetByRegistryID(ctx, repoEntity.ID) + if err != nil { + return throwCreateRegistry400Error(err), nil + } + + return artifact.CreateRegistry201JSONResponse{ + RegistryResponseJSONResponse: *CreateVirtualRepositoryResponse( + repoEntity, c.getUpstreamProxyKeys(ctx, repoEntity.UpstreamProxies), + cleanupPolicies, regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func (c *APIController) createUpstreamProxyWithAudit( + ctx context.Context, + upstreamProxy *registrytypes.UpstreamProxyConfig, principal types.Principal, + parentRef string, registryName string, +) (int64, error) { + id, err := c.UpstreamProxyStore.Create(ctx, upstreamProxy) + if err != nil { + return id, err + } + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistryUpstreamProxy, registryName), + audit.ActionCreated, + parentRef, + audit.WithNewObject( + audit.RegistryUpstreamProxyConfigObject{ + ID: id, + RegistryID: upstreamProxy.RegistryID, + Source: upstreamProxy.Source, + URL: upstreamProxy.URL, + AuthType: upstreamProxy.AuthType, + CreatedAt: upstreamProxy.CreatedAt, + UpdatedAt: upstreamProxy.UpdatedAt, + CreatedBy: upstreamProxy.CreatedBy, + UpdatedBy: upstreamProxy.UpdatedBy, + }, + ), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf( + "failed to insert audit log for create upstream proxy config operation: %s", auditErr, + ) + } + + return id, err +} + +func (c *APIController) createRegistryWithAudit( + ctx context.Context, registry *registrytypes.Registry, + principal types.Principal, parentRef string, +) (int64, error) { + id, err := c.RegistryRepository.Create(ctx, registry) + if err != nil { + return id, err + } + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistry, registry.Name), + audit.ActionCreated, + parentRef, + audit.WithNewObject( + audit.RegistryObject{ + Registry: *registry, + }, + ), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf("failed to insert audit log for create registry operation: %s", auditErr) + } + return id, err +} + +func throwCreateRegistry400Error(err error) artifact.CreateRegistry400JSONResponse { + return artifact.CreateRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + } +} + +func CreateRegistryEntity( + dto artifact.RegistryRequest, parentID int64, + rootParentID int64, +) (*registrytypes.Registry, error) { + allowedPattern, blockedPattern, description, labels := getRepoEntityFields(dto) + e := ValidatePackageType(string(dto.PackageType)) + if e != nil { + return nil, e + } + e = ValidateRepoType(string(dto.Config.Type)) + if e != nil { + return nil, e + } + e = ValidateIdentifier(dto.Identifier) + if e != nil { + return nil, e + } + entity := ®istrytypes.Registry{ + Name: dto.Identifier, + ParentID: parentID, + RootParentID: rootParentID, + Description: description, + AllowedPattern: allowedPattern, + BlockedPattern: blockedPattern, + PackageType: dto.PackageType, + Labels: labels, + Type: dto.Config.Type, + } + return entity, nil +} + +func CreateUpstreamProxyEntity( + dto artifact.RegistryRequest, + parentID int64, + rootParentID int64, +) (*registrytypes.Registry, *registrytypes.UpstreamProxyConfig, error) { + allowedPattern := []string{} + if dto.AllowedPattern != nil { + allowedPattern = *dto.AllowedPattern + } + blockedPattern := []string{} + if dto.BlockedPattern != nil { + blockedPattern = *dto.BlockedPattern + } + e := ValidatePackageType(string(dto.PackageType)) + if e != nil { + return nil, nil, e + } + e = ValidateUpstream(dto.Config) + if e != nil { + return nil, nil, e + } + e = ValidateIdentifier(dto.Identifier) + if e != nil { + return nil, nil, e + } + repoEntity := ®istrytypes.Registry{ + Name: dto.Identifier, + ParentID: parentID, + RootParentID: rootParentID, + AllowedPattern: allowedPattern, + BlockedPattern: blockedPattern, + PackageType: dto.PackageType, + Type: artifact.RegistryTypeUPSTREAM, + } + + config, e := dto.Config.AsUpstreamConfig() + if e != nil { + return nil, nil, e + } + CleanURLPath(config.Url) + upstreamProxyConfigEntity := ®istrytypes.UpstreamProxyConfig{ + URL: *config.Url, + AuthType: string(config.AuthType), + } + if config.Source != nil && len(string(*config.Source)) > 0 { + err := ValidateUpstreamSource(string(*config.Source)) + if err != nil { + return nil, nil, err + } + upstreamProxyConfigEntity.Source = string(*config.Source) + } + if config.AuthType == artifact.AuthTypeUserPassword { + res, err := config.Auth.AsUserPassword() + if err != nil { + return nil, nil, err + } + upstreamProxyConfigEntity.UserName = res.UserName + if res.SecretIdentifier == nil || res.SecretSpaceId == nil { + return nil, nil, fmt.Errorf("failed to create upstream proxy: secret_identifier or secret_space_id missing") + } + upstreamProxyConfigEntity.SecretIdentifier = *res.SecretIdentifier + upstreamProxyConfigEntity.SecretSpaceID = *res.SecretSpaceId + } + return repoEntity, upstreamProxyConfigEntity, nil +} diff --git a/registry/app/api/controller/metadata/delete_registry.go b/registry/app/api/controller/metadata/delete_registry.go new file mode 100644 index 000000000..07bb6885c --- /dev/null +++ b/registry/app/api/controller/metadata/delete_registry.go @@ -0,0 +1,170 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/audit" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types" + "github.com/harness/gitness/types/enum" + + "github.com/rs/zerolog/log" +) + +func (c *APIController) DeleteRegistry( + ctx context.Context, + r artifact.DeleteRegistryRequestObject, +) (artifact.DeleteRegistryResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.DeleteRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.DeleteRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryDelete) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.DeleteRegistry403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, err + } + + repoEntity, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if len(repoEntity.Name) == 0 { + return artifact.DeleteRegistry404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this key"), + ), + }, nil + } + if err != nil { + return throwDeleteRegistry500Error(err), err + } + + if string(repoEntity.Type) == string(artifact.RegistryTypeVIRTUAL) { + err = c.deleteRegistryWithAudit(ctx, regInfo, repoEntity, session.Principal, regInfo.parentRef) + } else { + err = c.tx.WithTx( + ctx, func(ctx context.Context) error { + err = c.deleteUpstreamProxyWithAudit( + ctx, regInfo, session.Principal, regInfo.parentRef, repoEntity.Name, + ) + + if err != nil { + return fmt.Errorf("failed to delete upstream proxy: %w", err) + } + + err = c.deleteRegistryWithAudit(ctx, regInfo, repoEntity, session.Principal, regInfo.parentRef) + + if err != nil { + return fmt.Errorf("failed to delete registry: %w", err) + } + + return nil + }, + ) + } + if err != nil { + return throwDeleteRegistry500Error(err), err + } + return artifact.DeleteRegistry200JSONResponse{ + SuccessJSONResponse: artifact.SuccessJSONResponse(*GetSuccessResponse()), + }, nil +} + +func (c *APIController) deleteUpstreamProxyWithAudit( + ctx context.Context, + regInfo *RegistryRequestBaseInfo, principal types.Principal, parentRef string, registryName string, +) error { + err := c.UpstreamProxyStore.Delete(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return err + } + + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistryUpstreamProxy, registryName), + audit.ActionDeleted, + parentRef, + audit.WithData("registry name", registryName), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf( + "failed to insert audit log for delete upstream proxy config operation: %s", auditErr, + ) + } + + return err +} + +func (c *APIController) deleteRegistryWithAudit( + ctx context.Context, regInfo *RegistryRequestBaseInfo, + registry *registrytypes.Registry, principal types.Principal, parentRef string, +) error { + err := c.RegistryRepository.Delete(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return err + } + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistry, registry.Name), + audit.ActionDeleted, + parentRef, + audit.WithOldObject( + audit.RegistryObject{ + Registry: *registry, + }, + ), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf("failed to insert audit log for delete registry operation: %s", auditErr) + } + return err +} + +func throwDeleteRegistry500Error(err error) artifact.DeleteRegistry500JSONResponse { + return artifact.DeleteRegistry500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + } +} diff --git a/registry/app/api/controller/metadata/get_artifact_stats.go b/registry/app/api/controller/metadata/get_artifact_stats.go new file mode 100644 index 000000000..654a8f887 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifact_stats.go @@ -0,0 +1,112 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetArtifactStats( + _ context.Context, + _ artifact.GetArtifactStatsRequestObject, +) (artifact.GetArtifactStatsResponseObject, error) { + return nil, nil +} + +func (c *APIController) GetArtifactStatsForSpace( + ctx context.Context, + r artifact.GetArtifactStatsForSpaceRequestObject, +) (artifact.GetArtifactStatsForSpaceResponseObject, error) { + parentRef := r.SpaceRef + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, string(parentRef), "") + if err != nil { + return artifact.GetArtifactStatsForSpace400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetArtifactStatsForSpace400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetArtifactStatsForSpace403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + return nil, nil +} + +func (c *APIController) GetArtifactStatsForRegistry( + ctx context.Context, + r artifact.GetArtifactStatsForRegistryRequestObject, +) (artifact.GetArtifactStatsForRegistryResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetArtifactStatsForRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetArtifactStatsForRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + if err = apiauth.CheckSpaceScope( + ctx, + c.Authorizer, + session, + space, + enum.ResourceTypeRegistry, + enum.PermissionRegistryView, + ); err != nil { + return artifact.GetArtifactStatsForRegistry403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + return nil, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts.go b/registry/app/api/controller/metadata/get_artifacts.go new file mode 100644 index 000000000..8c11ec8b4 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts.go @@ -0,0 +1,113 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetAllArtifacts( + ctx context.Context, + r artifact.GetAllArtifactsRequestObject, +) (artifact.GetAllArtifactsResponseObject, error) { + ref := "" + if r.Params.RegIdentifier != nil { + ref2, err2 := GetRegRef(string(r.SpaceRef), string(*r.Params.RegIdentifier)) + if err2 != nil { + return c.getAllArtifacts400JsonResponse(err2) + } + ref = ref2 + } + + regInfo, err := c.GetRegistryRequestInfo( + ctx, r.Params.PackageType, r.Params.Page, r.Params.Size, + r.Params.SearchTerm, ArtifactResource, string(r.SpaceRef), ref, r.Params.Label, + r.Params.SortOrder, r.Params.SortField, + ) + if err != nil { + return c.getAllArtifacts400JsonResponse(err) + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetAllArtifacts400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetAllArtifacts403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + var artifacts *[]types.ArtifactMetadata + var count int64 + if len(regInfo.RegistryIdentifier) == 0 { + artifacts, err = c.TagStore.GetAllArtifactsByParentID( + ctx, regInfo.parentID, ®Info.packageTypes, + regInfo.sortByField, regInfo.sortByOrder, regInfo.limit, regInfo.offset, regInfo.searchTerm, regInfo.labels, + ) + count, _ = c.TagStore.CountAllArtifactsByParentID( + ctx, regInfo.parentID, ®Info.packageTypes, + regInfo.searchTerm, regInfo.labels, + ) + } else { + artifacts, err = c.TagStore.GetAllArtifactsByRepo( + ctx, regInfo.parentID, regInfo.RegistryIdentifier, + regInfo.sortByField, regInfo.sortByOrder, regInfo.limit, regInfo.offset, regInfo.searchTerm, regInfo.labels, + ) + count, _ = c.TagStore.CountAllArtifactsByRepo( + ctx, regInfo.parentID, regInfo.RegistryIdentifier, + regInfo.searchTerm, regInfo.labels, + ) + } + if err != nil { + return artifact.GetAllArtifacts500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + return artifact.GetAllArtifacts200JSONResponse{ + ListArtifactResponseJSONResponse: *GetAllArtifactResponse(artifacts, count, regInfo.pageNumber, regInfo.limit), + }, nil +} + +func (c *APIController) getAllArtifacts400JsonResponse(err error) (artifact.GetAllArtifactsResponseObject, error) { + return artifact.GetAllArtifacts400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_docker_details.go b/registry/app/api/controller/metadata/get_artifacts_docker_details.go new file mode 100644 index 000000000..da5289f0d --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_docker_details.go @@ -0,0 +1,120 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "errors" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/types/enum" + + "github.com/opencontainers/go-digest" +) + +func (c *APIController) GetDockerArtifactDetails( + ctx context.Context, + r artifact.GetDockerArtifactDetailsRequestObject, +) (artifact.GetDockerArtifactDetailsResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetDockerArtifactDetails400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetDockerArtifactDetails400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetDockerArtifactDetails403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + version := string(r.Version) + manifestDigest := string(r.Params.Digest) + + registry, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + + if err != nil { + return artifact.GetDockerArtifactDetails500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + + tag, err := c.TagStore.GetTagDetail(ctx, registry.ID, image, version) + if err != nil { + return getArtifactDetailsErrResponse(err) + } + dgst, err := types.NewDigest(digest.Digest(manifestDigest)) + if err != nil { + return getArtifactDetailsErrResponse(err) + } + m, err := c.ManifestStore.FindManifestByDigest(ctx, registry.ID, image, dgst) + + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return getArtifactDetailsErrResponse(fmt.Errorf("manifest not found")) + } + return getArtifactDetailsErrResponse(err) + } + + latestTag, err := c.TagStore.GetLatestTag(ctx, registry.ID, image) + if err != nil { + return getArtifactDetailsErrResponse(err) + } + + return artifact.GetDockerArtifactDetails200JSONResponse{ + DockerArtifactDetailResponseJSONResponse: *GetDockerArtifactDetails( + registry, tag, m, + latestTag.ID == tag.ID, regInfo, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func getArtifactDetailsErrResponse(err error) (artifact.GetDockerArtifactDetailsResponseObject, error) { + return artifact.GetDockerArtifactDetails500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_docker_layers.go b/registry/app/api/controller/metadata/get_artifacts_docker_layers.go new file mode 100644 index 000000000..f022ab4a5 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_docker_layers.go @@ -0,0 +1,125 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "errors" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/types/enum" + + "github.com/opencontainers/go-digest" +) + +func (c *APIController) GetDockerArtifactLayers( + ctx context.Context, + r artifact.GetDockerArtifactLayersRequestObject, +) (artifact.GetDockerArtifactLayersResponseObject, error) { + regInfo, _ := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetDockerArtifactLayers400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetDockerArtifactLayers403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + manifestDigest := string(r.Params.Digest) + image := string(r.Artifact) + + dgst, err := types.NewDigest(digest.Digest(manifestDigest)) + if err != nil { + return getLayersErrorResponse(err) + } + registry, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return getLayersErrorResponse(err) + } + if registry == nil { + return getLayersErrorResponse(fmt.Errorf("repository not found")) + } + + m, err := c.ManifestStore.FindManifestByDigest(ctx, registry.ID, image, dgst) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return getLayersErrorResponse(fmt.Errorf("manifest not found")) + } + return getLayersErrorResponse(err) + } + + mConfig, err := getManifestConfig(ctx, m.Configuration.Digest, regInfo.rootIdentifier, c.StorageDriver) + if err != nil { + return getLayersErrorResponse(err) + } + + layersSummary := &artifact.DockerLayersSummary{ + Digest: m.Digest.String(), + } + + if mConfig != nil { + osArch := fmt.Sprintf("%s/%s", mConfig.Os, mConfig.Arch) + layersSummary.OsArch = &osArch + var historyLayers []artifact.DockerLayerEntry + for _, history := range mConfig.History { + historyLayers = append( + historyLayers, artifact.DockerLayerEntry{ + Command: history.CreatedBy, + }, + ) + } + layersSummary.Layers = &historyLayers + } else { + return getLayersErrorResponse(fmt.Errorf("manifest config not found")) + } + + return artifact.GetDockerArtifactLayers200JSONResponse{ + DockerLayersResponseJSONResponse: artifact.DockerLayersResponseJSONResponse{ + Data: *layersSummary, + Status: artifact.StatusSUCCESS, + }, + }, nil +} + +func getLayersErrorResponse(err error) (artifact.GetDockerArtifactLayersResponseObject, error) { + return artifact.GetDockerArtifactLayers500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_docker_manifest.go b/registry/app/api/controller/metadata/get_artifacts_docker_manifest.go new file mode 100644 index 000000000..d3a41de05 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_docker_manifest.go @@ -0,0 +1,103 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types/enum" + + "github.com/opencontainers/go-digest" +) + +func (c *APIController) GetDockerArtifactManifest( + ctx context.Context, + r artifact.GetDockerArtifactManifestRequestObject, +) (artifact.GetDockerArtifactManifestResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetDockerArtifactManifest400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetDockerArtifactManifest400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetDockerArtifactManifest403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + imageName := string(r.Artifact) + dgst := string(r.Params.Digest) + manifestDigest, err := types.NewDigest(digest.Digest(dgst)) + if err != nil { + return getArtifactManifestErrorResponse(err) + } + + manifestPayload, err := c.ManifestStore.GetManifestPayload( + ctx, + regInfo.parentID, + regInfo.RegistryIdentifier, + imageName, + manifestDigest, + ) + + if err != nil { + return getArtifactManifestErrorResponse(err) + } + + payload := *manifestPayload + return artifact.GetDockerArtifactManifest200JSONResponse{ + DockerArtifactManifestResponseJSONResponse: artifact.DockerArtifactManifestResponseJSONResponse{ + Data: artifact.DockerArtifactManifest{ + Manifest: string(payload), + }, + Status: artifact.StatusSUCCESS, + }, + }, nil +} + +func getArtifactManifestErrorResponse(err error) (artifact.GetDockerArtifactManifestResponseObject, error) { + return artifact.GetDockerArtifactManifest500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_docker_manifests.go b/registry/app/api/controller/metadata/get_artifacts_docker_manifests.go new file mode 100644 index 000000000..80fa94011 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_docker_manifests.go @@ -0,0 +1,165 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "database/sql" + "errors" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + ml "github.com/harness/gitness/registry/app/manifest/manifestlist" + os "github.com/harness/gitness/registry/app/manifest/ocischema" + s2 "github.com/harness/gitness/registry/app/manifest/schema2" + "github.com/harness/gitness/registry/app/pkg/docker" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/types/enum" + + "github.com/rs/zerolog/log" +) + +func (c *APIController) GetDockerArtifactManifests( + ctx context.Context, + r artifact.GetDockerArtifactManifestsRequestObject, +) (artifact.GetDockerArtifactManifestsResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetDockerArtifactManifests400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetDockerArtifactManifests400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetDockerArtifactManifests403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + version := string(r.Version) + registry, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return nil, err + } + t, err := c.TagStore.FindTag(ctx, registry.ID, image, version) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + m, err := c.ManifestStore.Get(ctx, t.ManifestID) + if err != nil { + return nil, err + } + manifest, err := docker.DBManifestToManifest(m) + manifestDetailsList := []artifact.DockerManifestDetails{} + switch reqManifest := manifest.(type) { + case *s2.DeserializedManifest: + mConfig, err := getManifestConfig(ctx, reqManifest.Config().Digest, regInfo.rootIdentifier, c.StorageDriver) + if err != nil { + return artifactManifestsErrorRs(err), nil + } + manifestDetailsList = append(manifestDetailsList, getManifestDetails(m, mConfig)) + case *os.DeserializedManifest: + mConfig, err := getManifestConfig(ctx, reqManifest.Config().Digest, regInfo.rootIdentifier, c.StorageDriver) + if err != nil { + return artifactManifestsErrorRs(err), nil + } + manifestDetailsList = append(manifestDetailsList, getManifestDetails(m, mConfig)) + case *ml.DeserializedManifestList: + for _, manifestEntry := range reqManifest.Manifests { + dgst, err := types.NewDigest(manifestEntry.Digest) + if err != nil { + return artifactManifestsErrorRs(err), nil + } + referencedManifest, err := c.ManifestStore.FindManifestByDigest(ctx, registry.ID, image, dgst) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return artifactManifestsErrorRs( + fmt.Errorf("manifest not found"), + ), nil + } + return artifactManifestsErrorRs(err), nil + } + mConfig, err := getManifestConfig( + ctx, referencedManifest.Configuration.Digest, + regInfo.rootIdentifier, c.StorageDriver, + ) + if err != nil { + return artifactManifestsErrorRs(err), nil + } + manifestDetailsList = append(manifestDetailsList, getManifestDetails(referencedManifest, mConfig)) + } + default: + log.Ctx(ctx).Error().Stack().Err(err).Msgf("Unknown manifest type: %T", manifest) + } + + return artifact.GetDockerArtifactManifests200JSONResponse{ + DockerManifestsResponseJSONResponse: artifact.DockerManifestsResponseJSONResponse{ + Data: artifact.DockerManifests{ + ImageName: t.ImageName, + Version: t.Name, + Manifests: &manifestDetailsList, + }, + Status: artifact.StatusSUCCESS, + }, + }, nil +} + +func artifactManifestsErrorRs(err error) artifact.GetDockerArtifactManifestsResponseObject { + return artifact.GetDockerArtifactManifests500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + } +} + +func getManifestDetails(m *types.Manifest, mConfig *manifestConfig) artifact.DockerManifestDetails { + createdAt := GetTimeInMs(m.CreatedAt) + size := GetSize(m.TotalSize) + + manifestDetails := artifact.DockerManifestDetails{ + Digest: m.Digest.String(), + CreatedAt: &createdAt, + Size: &size, + } + if mConfig != nil { + manifestDetails.OsArch = fmt.Sprintf("%s/%s", mConfig.Os, mConfig.Arch) + } + return manifestDetails +} diff --git a/registry/app/api/controller/metadata/get_artifacts_helm_details.go b/registry/app/api/controller/metadata/get_artifacts_helm_details.go new file mode 100644 index 000000000..c8de73038 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_helm_details.go @@ -0,0 +1,108 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "errors" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetHelmArtifactDetails( + ctx context.Context, + r artifact.GetHelmArtifactDetailsRequestObject, +) (artifact.GetHelmArtifactDetailsResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetHelmArtifactDetails400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetHelmArtifactDetails400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetHelmArtifactDetails403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + version := string(r.Version) + + registry, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + + if err != nil { + return artifact.GetHelmArtifactDetails500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + + tag, err := c.TagStore.GetTagDetail(ctx, registry.ID, image, version) + if err != nil { + return getHelmArtifactDetailsErrResponse(err) + } + m, err := c.ManifestStore.FindManifestByTagName(ctx, registry.ID, image, version) + + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return getHelmArtifactDetailsErrResponse(fmt.Errorf("manifest not found")) + } + return getHelmArtifactDetailsErrResponse(err) + } + + latestTag, _ := c.TagStore.GetLatestTag(ctx, registry.ID, image) + + return artifact.GetHelmArtifactDetails200JSONResponse{ + HelmArtifactDetailResponseJSONResponse: *GetHelmArtifactDetails( + registry, tag, m, + latestTag.ID == tag.ID, regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func getHelmArtifactDetailsErrResponse(err error) (artifact.GetHelmArtifactDetailsResponseObject, error) { + return artifact.GetHelmArtifactDetails500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_helm_manifest.go b/registry/app/api/controller/metadata/get_artifacts_helm_manifest.go new file mode 100644 index 000000000..c83ee34a7 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_helm_manifest.go @@ -0,0 +1,105 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "errors" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetHelmArtifactManifest( + ctx context.Context, + r artifact.GetHelmArtifactManifestRequestObject, +) (artifact.GetHelmArtifactManifestResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return c.get400Error(err) + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetHelmArtifactManifest400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetHelmArtifactManifest403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + imageName := string(r.Artifact) + version := string(r.Version) + + manifestPayload, err := c.ManifestStore.FindManifestPayloadByTagName( + ctx, + regInfo.parentID, + regInfo.RegistryIdentifier, + imageName, + version, + ) + + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return artifact.GetHelmArtifactManifest400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + return artifact.GetHelmArtifactManifest500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + + payload := *manifestPayload + return artifact.GetHelmArtifactManifest200JSONResponse{ + HelmArtifactManifestResponseJSONResponse: artifact.HelmArtifactManifestResponseJSONResponse{ + Data: artifact.HelmArtifactManifest{ + Manifest: string(payload), + }, + Status: artifact.StatusSUCCESS, + }, + }, nil +} + +func (c *APIController) get400Error(err error) (artifact.GetHelmArtifactManifestResponseObject, error) { + return artifact.GetHelmArtifactManifest400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_labels.go b/registry/app/api/controller/metadata/get_artifacts_labels.go new file mode 100644 index 000000000..70ea278e3 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_labels.go @@ -0,0 +1,83 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) ListArtifactLabels( + ctx context.Context, + r artifact.ListArtifactLabelsRequestObject, +) (artifact.ListArtifactLabelsResponseObject, error) { + regInfo, _ := c.GetRegistryRequestInfo( + ctx, nil, r.Params.Page, r.Params.Size, + r.Params.SearchTerm, ArtifactResource, "", string(r.RegistryRef), + nil, nil, nil, + ) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.ListArtifactLabels400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.ListArtifactLabels403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + labels, err := c.ArtifactStore.GetLabelsByParentIDAndRepo( + ctx, regInfo.parentID, + regInfo.RegistryIdentifier, regInfo.limit, regInfo.offset, regInfo.searchTerm, + ) + count, _ := c.ArtifactStore.CountLabelsByParentIDAndRepo( + ctx, regInfo.parentID, + regInfo.RegistryIdentifier, regInfo.searchTerm, + ) + + if err != nil { + return artifact.ListArtifactLabels500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + return artifact.ListArtifactLabels200JSONResponse{ + ListArtifactLabelResponseJSONResponse: *GetAllArtifactLabelsResponse( + &labels, count, + regInfo.pageNumber, regInfo.limit, + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_summary.go b/registry/app/api/controller/metadata/get_artifacts_summary.go new file mode 100644 index 000000000..0ab3377d8 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_summary.go @@ -0,0 +1,78 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetArtifactSummary( + ctx context.Context, + r artifact.GetArtifactSummaryRequestObject, +) (artifact.GetArtifactSummaryResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetArtifactSummary400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetArtifactSummary400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetArtifactSummary403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + + tag, err := c.TagStore.GetLatestTagMetadata(ctx, regInfo.parentID, regInfo.RegistryIdentifier, image) + + if err != nil { + return artifact.GetArtifactSummary500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + return artifact.GetArtifactSummary200JSONResponse{ + ArtifactSummaryResponseJSONResponse: *GetArtifactSummary(*tag), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_version_summary.go b/registry/app/api/controller/metadata/get_artifacts_version_summary.go new file mode 100644 index 000000000..66fbf9597 --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_version_summary.go @@ -0,0 +1,77 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetArtifactVersionSummary( + ctx context.Context, + r artifact.GetArtifactVersionSummaryRequestObject, +) (artifact.GetArtifactVersionSummaryResponseObject, error) { + regInfo, _ := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetArtifactVersionSummary400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetArtifactVersionSummary403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + version := string(r.Version) + + tag, err := c.TagStore.GetTagMetadata(ctx, regInfo.parentID, regInfo.RegistryIdentifier, image, version) + + if err != nil { + return artifact.GetArtifactVersionSummary500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + + latestTag, _ := c.TagStore.GetLatestTagName(ctx, regInfo.parentID, regInfo.RegistryIdentifier, image) + + isLatestTag := latestTag == version + + return artifact.GetArtifactVersionSummary200JSONResponse{ + ArtifactVersionSummaryResponseJSONResponse: *GetArtifactVersionSummary(tag, image, isLatestTag), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_artifacts_versions.go b/registry/app/api/controller/metadata/get_artifacts_versions.go new file mode 100644 index 000000000..5dd9e230c --- /dev/null +++ b/registry/app/api/controller/metadata/get_artifacts_versions.go @@ -0,0 +1,89 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetAllArtifactVersions( + ctx context.Context, + r artifact.GetAllArtifactVersionsRequestObject, +) (artifact.GetAllArtifactVersionsResponseObject, error) { + regInfo, _ := c.GetRegistryRequestInfo( + ctx, nil, r.Params.Page, r.Params.Size, + r.Params.SearchTerm, ArtifactVersionResource, "", string(r.RegistryRef), + nil, r.Params.SortOrder, r.Params.SortField, + ) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetAllArtifactVersions400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetAllArtifactVersions403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + image := string(r.Artifact) + + tags, err := c.TagStore.GetAllTagsByRepoAndImage( + ctx, regInfo.parentID, regInfo.RegistryIdentifier, + image, regInfo.sortByField, regInfo.sortByOrder, regInfo.limit, regInfo.offset, regInfo.searchTerm, + ) + + latestTag, _ := c.TagStore.GetLatestTagName(ctx, regInfo.parentID, regInfo.RegistryIdentifier, image) + + count, _ := c.TagStore.CountAllTagsByRepoAndImage( + ctx, regInfo.parentID, regInfo.RegistryIdentifier, + image, regInfo.searchTerm, + ) + + if err != nil { + return artifact.GetAllArtifactVersions500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + + return artifact.GetAllArtifactVersions200JSONResponse{ + ListArtifactVersionResponseJSONResponse: *GetAllArtifactVersionResponse( + ctx, tags, latestTag, image, count, + regInfo, regInfo.pageNumber, regInfo.limit, regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil +} diff --git a/registry/app/api/controller/metadata/get_client_setup_details.go b/registry/app/api/controller/metadata/get_client_setup_details.go new file mode 100644 index 000000000..e471d8a91 --- /dev/null +++ b/registry/app/api/controller/metadata/get_client_setup_details.go @@ -0,0 +1,317 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + "strings" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/common" + "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetClientSetupDetails( + ctx context.Context, + r artifact.GetClientSetupDetailsRequestObject, +) (artifact.GetClientSetupDetailsResponseObject, error) { + regRefParam := r.RegistryRef + imageParam := r.Params.Artifact + tagParam := r.Params.Version + + regInfo, _ := c.GetRegistryRequestBaseInfo(ctx, "", string(regRefParam)) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetClientSetupDetails400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetClientSetupDetails403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + reg, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return artifact.GetClientSetupDetails404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this ref"), + ), + }, err + } + + if imageParam != nil { + _, err := c.ArtifactStore.GetByName(ctx, reg.ID, string(*imageParam)) + if err != nil { + return artifact.GetClientSetupDetails404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "image doesn't exist"), + ), + }, err + } + if tagParam != nil { + _, err := c.TagStore.FindTag(ctx, reg.ID, string(*imageParam), string(*tagParam)) + if err != nil { + return artifact.GetClientSetupDetails404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "tag doesn't exist"), + ), + }, err + } + } + } + + packageType := string(reg.PackageType) + + return artifact.GetClientSetupDetails200JSONResponse{ + ClientSetupDetailsResponseJSONResponse: *GetClientSetupDetails( + ctx, packageType, regInfo, reg, + string(r.RegistryRef), imageParam, tagParam, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func GetClientSetupDetails( + ctx context.Context, + packageType string, + _ *RegistryRequestBaseInfo, + _ *types.Registry, + regRef string, + image *artifact.ArtifactParam, + tag *artifact.VersionParam, + registryURL string, +) *artifact.ClientSetupDetailsResponseJSONResponse { + session, _ := request.AuthSessionFrom(ctx) + username := session.Principal.Email + hostname := common.GenerateSetupClientHostname(registryURL) + regRef = strings.ToLower(regRef) + + // Fixme: Use ENUMS + if packageType == "HELM" { + header1 := "Login to Helm" + section1step1Header := "Run this Helm command in your terminal to authenticate the client." + section1step1Commands := []string{"helm registry login "} + section1step1Type := artifact.ClientSetupStepTypeStatic + section1step2Header := "For the Password field above, generate an identity token" + section1step2Type := artifact.ClientSetupStepTypeGenerateToken + section1 := []artifact.ClientSetupStep{ + { + Header: §ion1step1Header, + Commands: §ion1step1Commands, + Type: §ion1step1Type, + }, + { + Header: §ion1step2Header, + Type: §ion1step2Type, + }, + } + + header2 := "Push a version" + section2step1Header := "Run this Helm push command in your terminal to push a chart in OCI form." + + " Note: Make sure you add oci:// prefix to the repository URL." + section2step1Commands := []string{"helm push oci:///"} + section2step1Type := artifact.ClientSetupStepTypeStatic + section2 := []artifact.ClientSetupStep{ + { + Header: §ion2step1Header, + Commands: §ion2step1Commands, + Type: §ion2step1Type, + }, + } + + header3 := "Pull a version" + section3step1Header := "Run this Helm command in your terminal to pull a specific chart version." + section3step1Commands := []string{ + "helm pull oci://// --version ", + } + section3step1Type := artifact.ClientSetupStepTypeStatic + section3 := []artifact.ClientSetupStep{ + { + Header: §ion3step1Header, + Commands: §ion3step1Commands, + Type: §ion3step1Type, + }, + } + clientSetupDetails := artifact.ClientSetupDetails{ + MainHeader: "Helm Client Setup", + SecHeader: "Follow these instructions to install/use Helm artifacts or compatible packages.", + Sections: []artifact.ClientSetupSection{ + { + Header: &header1, + Steps: §ion1, + }, + { + Header: &header2, + Steps: §ion2, + }, + { + Header: &header3, + Steps: §ion3, + }, + }, + } + + replacePlaceholders(clientSetupDetails, username, hostname, regRef, image, tag) + + return &artifact.ClientSetupDetailsResponseJSONResponse{ + Data: clientSetupDetails, + Status: artifact.StatusSUCCESS, + } + } + header1 := "Login to Docker" + section1step1Header := "Run this Docker command in your terminal to authenticate the client." + section1step1Commands := []string{"docker login ", "Username: ", "Password: *see step 2*"} + section1step1Type := artifact.ClientSetupStepTypeStatic + section1step2Header := "For the Password field above, generate an identity token" + section1step2Type := artifact.ClientSetupStepTypeGenerateToken + section1 := []artifact.ClientSetupStep{ + { + Header: §ion1step1Header, + Commands: §ion1step1Commands, + Type: §ion1step1Type, + }, + { + Header: §ion1step2Header, + Type: §ion1step2Type, + }, + } + header2 := "Pull an image" + section2step1Header := "Run this Docker command in your terminal to pull image." + section2step1Commands := []string{"docker pull //:"} + section2step1Type := artifact.ClientSetupStepTypeStatic + section2 := []artifact.ClientSetupStep{ + { + Header: §ion2step1Header, + Commands: §ion2step1Commands, + Type: §ion2step1Type, + }, + } + header3 := "Retag and Push the image" + section3step1Header := "Run this Docker command in your terminal to tag the image." + section3step1Commands := []string{ + "docker tag //" + + " //:", + } + section3step1Type := artifact.ClientSetupStepTypeStatic + section3step2Header := "Run this Docker command in your terminal to push the image." + section3step2Commands := []string{"docker push //:"} + section3step2Type := artifact.ClientSetupStepTypeStatic + section3 := []artifact.ClientSetupStep{ + { + Header: §ion3step1Header, + Commands: §ion3step1Commands, + Type: §ion3step1Type, + }, + { + Header: §ion3step2Header, + Commands: §ion3step2Commands, + Type: §ion3step2Type, + }, + } + clientSetupDetails := artifact.ClientSetupDetails{ + MainHeader: "Docker Client Setup", + SecHeader: "Follow these instructions to install/use Docker artifacts or compatible packages.", + Sections: []artifact.ClientSetupSection{ + { + Header: &header1, + Steps: §ion1, + }, + { + Header: &header2, + Steps: §ion2, + }, + { + Header: &header3, + Steps: §ion3, + }, + }, + } + + replacePlaceholders(clientSetupDetails, username, hostname, regRef, image, tag) + + return &artifact.ClientSetupDetailsResponseJSONResponse{ + Data: clientSetupDetails, + Status: artifact.StatusSUCCESS, + } +} + +func replacePlaceholders( + clientSetupDetails artifact.ClientSetupDetails, username string, hostname string, + regRef string, image *artifact.ArtifactParam, tag *artifact.VersionParam, +) { + for _, s := range clientSetupDetails.Sections { + if s.Steps == nil { + continue + } + for _, st := range *s.Steps { + if st.Commands == nil { + continue + } + for i := range *st.Commands { + replaceText(username, st, i, hostname, regRef, image, tag) + } + } + } +} + +func replaceText( + username string, + st artifact.ClientSetupStep, + i int, + hostname string, + regRef string, + image *artifact.ArtifactParam, + tag *artifact.VersionParam, +) { + if username != "" { + (*st.Commands)[i] = strings.ReplaceAll((*st.Commands)[i], "", username) + } + if hostname != "" { + (*st.Commands)[i] = strings.ReplaceAll((*st.Commands)[i], "", hostname) + } + if regRef != "" { + (*st.Commands)[i] = strings.ReplaceAll( + (*st.Commands)[i], + "", regRef, + ) + } + if image != nil { + (*st.Commands)[i] = strings.ReplaceAll( + (*st.Commands)[i], + "", string(*image), + ) + } + if tag != nil { + (*st.Commands)[i] = strings.ReplaceAll((*st.Commands)[i], "", string(*tag)) + } +} diff --git a/registry/app/api/controller/metadata/get_registries.go b/registry/app/api/controller/metadata/get_registries.go new file mode 100644 index 000000000..b1fbe8043 --- /dev/null +++ b/registry/app/api/controller/metadata/get_registries.go @@ -0,0 +1,179 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/types/enum" + + "github.com/gotidy/ptr" +) + +func (c *APIController) GetAllRegistries( + ctx context.Context, + r artifact.GetAllRegistriesRequestObject, +) (artifact.GetAllRegistriesResponseObject, error) { + regInfo, _ := c.GetRegistryRequestInfo( + ctx, r.Params.PackageType, r.Params.Page, r.Params.Size, + r.Params.SearchTerm, RepositoryResource, string(r.SpaceRef), "", nil, + r.Params.SortOrder, r.Params.SortField, + ) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetAllRegistries400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + if err = apiauth.CheckSpaceScope( + ctx, + c.Authorizer, + session, + space, + enum.ResourceTypeRegistry, + enum.PermissionRegistryView, + ); err != nil { + return artifact.GetAllRegistries403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + var repos *[]store.RegistryMetadata + repoType := "" + if r.Params.Type != nil { + repoType = string(*r.Params.Type) + } + e := ValidatePackageTypes(regInfo.packageTypes) + if e != nil { + return nil, e + } + e = ValidateRepoType(repoType) + if e != nil { + return nil, e + } + var count int64 + repos, err = c.RegistryRepository.GetAll( + ctx, + regInfo.parentID, + regInfo.packageTypes, + regInfo.sortByField, + regInfo.sortByOrder, + regInfo.limit, + regInfo.offset, + regInfo.searchTerm, + repoType, + ) + count, _ = c.RegistryRepository.CountAll( + ctx, + regInfo.parentID, + regInfo.packageTypes, + regInfo.searchTerm, + repoType, + ) + if err != nil { + return artifact.GetAllRegistries500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + return artifact.GetAllRegistries200JSONResponse{ + ListRegistryResponseJSONResponse: *GetAllRegistryResponse( + repos, count, regInfo.pageNumber, + regInfo.limit, regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func GetAllRegistryResponse( + repos *[]store.RegistryMetadata, + count int64, + pageNumber int64, + pageSize int, + rootIdentifier string, + registryURL string, +) *artifact.ListRegistryResponseJSONResponse { + repoMetadataList := GetRegistryMetadata(repos, rootIdentifier, registryURL) + pageCount := GetPageCount(count, pageSize) + listRepository := &artifact.ListRegistry{ + ItemCount: &count, + PageCount: &pageCount, + PageIndex: &pageNumber, + PageSize: &pageSize, + Registries: repoMetadataList, + } + response := &artifact.ListRegistryResponseJSONResponse{ + Data: *listRepository, + Status: artifact.StatusSUCCESS, + } + return response +} + +func GetRegistryMetadata( + registryMetadatas *[]store.RegistryMetadata, + rootIdentifier string, + registryURL string, +) []artifact.RegistryMetadata { + repoMetadataList := []artifact.RegistryMetadata{} + for _, reg := range *registryMetadatas { + modifiedAt := GetTimeInMs(reg.LastModified) + var labels *[]string + if !commons.IsEmpty(reg.Labels) { + temp := []string(reg.Labels) + labels = &temp + } + var description string + if !commons.IsEmpty(reg.Description) { + description = reg.Description + } + var artifactCount *int64 + if reg.ArtifactCount != 0 { + artifactCount = ptr.Int64(reg.ArtifactCount) + } + var downloadCount *int64 + if reg.DownloadCount != 0 { + downloadCount = ptr.Int64(reg.DownloadCount) + } + // fix: refactor it + size := GetSize(reg.Size) + repoMetadata := artifact.RegistryMetadata{ + Identifier: reg.RegIdentifier, + Description: &description, + PackageType: reg.PackageType, + Type: reg.Type, + LastModified: &modifiedAt, + Url: GetRepoURL(rootIdentifier, reg.RegIdentifier, registryURL), + ArtifactsCount: artifactCount, + DownloadsCount: downloadCount, + RegistrySize: &size, + Labels: labels, + } + repoMetadataList = append(repoMetadataList, repoMetadata) + } + return repoMetadataList +} diff --git a/registry/app/api/controller/metadata/get_registry.go b/registry/app/api/controller/metadata/get_registry.go new file mode 100644 index 000000000..ff23e5c3a --- /dev/null +++ b/registry/app/api/controller/metadata/get_registry.go @@ -0,0 +1,109 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) GetRegistry( + ctx context.Context, + r artifact.GetRegistryRequestObject, +) (artifact.GetRegistryResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.GetRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.GetRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryView) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.GetRegistry403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + repoEntity, _ := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if string(repoEntity.Type) == string(artifact.RegistryTypeVIRTUAL) { + cleanupPolicies, err := c.CleanupPolicyStore.GetByRegistryID(ctx, repoEntity.ID) + if err != nil { + return throwGetRegistry500Error(err), nil + } + if len(repoEntity.Name) == 0 { + return artifact.GetRegistry404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this key"), + ), + }, nil + } + return artifact.GetRegistry200JSONResponse{ + RegistryResponseJSONResponse: *CreateVirtualRepositoryResponse( + repoEntity, c.getUpstreamProxyKeys( + ctx, + repoEntity.UpstreamProxies, + ), cleanupPolicies, regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil + } + upstreamproxyEntity, err := c.UpstreamProxyStore.GetByRegistryIdentifier( + ctx, + regInfo.parentID, regInfo.RegistryIdentifier, + ) + if len(upstreamproxyEntity.RepoKey) == 0 { + return artifact.GetRegistry404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this key"), + ), + }, nil + } + if err != nil { + return throwGetRegistry500Error(err), nil + } + return artifact.GetRegistry200JSONResponse{ + RegistryResponseJSONResponse: *CreateUpstreamProxyResponseJSONResponse(upstreamproxyEntity), + }, nil +} + +func throwGetRegistry500Error(err error) artifact.GetRegistry500JSONResponse { + return artifact.GetRegistry500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + } +} diff --git a/registry/app/api/controller/metadata/update_artifact.go b/registry/app/api/controller/metadata/update_artifact.go new file mode 100644 index 000000000..8252a43fa --- /dev/null +++ b/registry/app/api/controller/metadata/update_artifact.go @@ -0,0 +1,138 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types/enum" +) + +func (c *APIController) UpdateArtifactLabels( + ctx context.Context, + r artifact.UpdateArtifactLabelsRequestObject, +) (artifact.UpdateArtifactLabelsResponseObject, error) { + regInfo, _ := c.GetRegistryRequestInfo( + ctx, nil, nil, nil, nil, + ArtifactVersionResource, "", string(r.RegistryRef), nil, nil, nil, + ) + + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.UpdateArtifactLabels400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, nil + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, enum.PermissionRegistryEdit) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.UpdateArtifactLabels403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, nil + } + + a := string(r.Artifact) + + artifactEntity, err := c.ArtifactStore.GetByRepoAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier, a) + + if len(artifactEntity.Name) == 0 { + return artifact.UpdateArtifactLabels404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "artifact doesn't exist with this name"), + ), + }, nil + } + if err != nil { + return throwModifyArtifact400Error(err), nil + } + existingArtifact, err := AttachLabels(artifact.ArtifactLabelRequest(*r.Body), artifactEntity) + if err != nil { + return throwModifyArtifact400Error(err), nil + } + + err = c.ArtifactStore.Update(ctx, existingArtifact) + + if err != nil { + return throwModifyArtifact400Error(err), nil + } + + tag, err := c.TagStore.GetLatestTagMetadata(ctx, regInfo.parentID, regInfo.RegistryIdentifier, a) + + if err != nil { + return artifact.UpdateArtifactLabels500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + return artifact.UpdateArtifactLabels200JSONResponse{ + ArtifactLabelResponseJSONResponse: *getArtifactSummary(*tag), + }, nil +} + +func throwModifyArtifact400Error(err error) artifact.UpdateArtifactLabels400JSONResponse { + return artifact.UpdateArtifactLabels400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + } +} + +func AttachLabels( + dto artifact.ArtifactLabelRequest, + existingArtifact *types.Artifact, +) (*types.Artifact, error) { + return &types.Artifact{ + ID: existingArtifact.ID, + RegistryID: existingArtifact.RegistryID, + Name: existingArtifact.Name, + Labels: dto.Labels, + CreatedAt: existingArtifact.CreatedAt, + }, nil +} + +func getArtifactSummary(t types.ArtifactMetadata) *artifact.ArtifactLabelResponseJSONResponse { + downloads := int64(0) + createdAt := GetTimeInMs(t.CreatedAt) + modifiedAt := GetTimeInMs(t.ModifiedAt) + artifactVersionSummary := &artifact.ArtifactSummary{ + CreatedAt: &createdAt, + ModifiedAt: &modifiedAt, + DownloadsCount: &downloads, + ImageName: t.Name, + Labels: &t.Labels, + PackageType: t.PackageType, + } + response := &artifact.ArtifactLabelResponseJSONResponse{ + Data: *artifactVersionSummary, + Status: artifact.StatusSUCCESS, + } + return response +} diff --git a/registry/app/api/controller/metadata/update_registry.go b/registry/app/api/controller/metadata/update_registry.go new file mode 100644 index 000000000..1c2afaf68 --- /dev/null +++ b/registry/app/api/controller/metadata/update_registry.go @@ -0,0 +1,398 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "fmt" + "net/http" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/app/auth" + "github.com/harness/gitness/audit" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + types2 "github.com/harness/gitness/types" + gitnessenum "github.com/harness/gitness/types/enum" + + "github.com/rs/zerolog/log" +) + +func (c *APIController) ModifyRegistry( + ctx context.Context, + r artifact.ModifyRegistryRequestObject, +) (artifact.ModifyRegistryResponseObject, error) { + regInfo, err := c.GetRegistryRequestBaseInfo(ctx, "", string(r.RegistryRef)) + if err != nil { + return artifact.ModifyRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + space, err := c.spaceStore.FindByRef(ctx, regInfo.parentRef) + if err != nil { + return artifact.ModifyRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusBadRequest, err.Error()), + ), + }, err + } + + session, _ := request.AuthSessionFrom(ctx) + permissionChecks := getPermissionChecks(space, regInfo.RegistryIdentifier, gitnessenum.PermissionRegistryEdit) + if err = apiauth.CheckRegistry( + ctx, + c.Authorizer, + session, + permissionChecks..., + ); err != nil { + return artifact.ModifyRegistry403JSONResponse{ + UnauthorizedJSONResponse: artifact.UnauthorizedJSONResponse( + *GetErrorResponse(http.StatusForbidden, err.Error()), + ), + }, err + } + + repoEntity, err := c.RegistryRepository.GetByParentIDAndName(ctx, regInfo.parentID, regInfo.RegistryIdentifier) + if err != nil { + return throwModifyRegistry500Error(err), err + } + + if string(repoEntity.Type) == string(artifact.RegistryTypeVIRTUAL) { + return c.updateVirtualRegistry(ctx, r, repoEntity, err, regInfo, session) + } + upstreamproxyEntity, err := c.UpstreamProxyStore.GetByRegistryIdentifier( + ctx, regInfo.parentID, + regInfo.RegistryIdentifier, + ) + if len(upstreamproxyEntity.RepoKey) == 0 { + return artifact.ModifyRegistry404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this key"), + ), + }, nil + } + if err != nil { + return throwModifyRegistry500Error(err), err + } + registry, upstreamproxy, err := UpdateUpstreamProxyEntity( + artifact.RegistryRequest(*r.Body), + regInfo.parentID, regInfo.rootIdentifierID, upstreamproxyEntity, + ) + registry.ID = repoEntity.ID + upstreamproxy.ID = upstreamproxyEntity.ID + upstreamproxy.RegistryID = repoEntity.ID + if err != nil { + return throwModifyRegistry500Error(err), err + } + err = c.tx.WithTx( + ctx, func(ctx context.Context) error { + err = c.updateRegistryWithAudit(ctx, repoEntity, registry, session.Principal, regInfo.parentRef) + + if err != nil { + return fmt.Errorf("failed to update registry: %w", err) + } + + err = c.updateUpstreamProxyWithAudit( + ctx, upstreamproxy, session.Principal, regInfo.parentRef, registry.Name, + ) + + if err != nil { + return fmt.Errorf("failed to update upstream proxy: %w", err) + } + return nil + }, + ) + if err != nil { + return throwModifyRegistry500Error(err), err + } + modifiedRepoEntity, err := c.UpstreamProxyStore.Get(ctx, upstreamproxyEntity.RegistryID) + if err != nil { + return throwModifyRegistry500Error(err), err + } + return artifact.ModifyRegistry200JSONResponse{ + RegistryResponseJSONResponse: *CreateUpstreamProxyResponseJSONResponse(modifiedRepoEntity), + }, nil +} + +func (c *APIController) updateVirtualRegistry( + ctx context.Context, r artifact.ModifyRegistryRequestObject, repoEntity *types.Registry, err error, + regInfo *RegistryRequestBaseInfo, session *auth.Session, +) (artifact.ModifyRegistryResponseObject, error) { + if len(repoEntity.Name) == 0 { + return artifact.ModifyRegistry404JSONResponse{ + NotFoundJSONResponse: artifact.NotFoundJSONResponse( + *GetErrorResponse(http.StatusNotFound, "registry doesn't exist with this key"), + ), + }, nil + } + if err != nil { + return throwModifyRegistry500Error(err), err + } + registry, err := UpdateRepoEntity( + artifact.RegistryRequest(*r.Body), + repoEntity.ParentID, + repoEntity.RootParentID, + repoEntity, + ) + if err != nil { + return artifact.ModifyRegistry400JSONResponse{ + BadRequestJSONResponse: artifact.BadRequestJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + }, nil + } + err = c.setUpstreamProxyIDs(ctx, registry, artifact.RegistryRequest(*r.Body), regInfo.parentID) + if err != nil { + return throwModifyRegistry500Error(err), nil + } + err = c.updateRegistryWithAudit(ctx, repoEntity, registry, session.Principal, regInfo.parentRef) + + if err != nil { + return throwModifyRegistry500Error(err), nil + } + err = c.updateCleanupPolicy(ctx, r.Body, registry.ID) + if err != nil { + return throwModifyRegistry500Error(err), nil + } + modifiedRepoEntity, err := c.RegistryRepository.Get(ctx, registry.ID) + if err != nil { + return throwModifyRegistry500Error(err), nil + } + cleanupPolicies, err := c.CleanupPolicyStore.GetByRegistryID(ctx, repoEntity.ID) + if err != nil { + return throwModifyRegistry500Error(err), nil + } + return artifact.ModifyRegistry200JSONResponse{ + RegistryResponseJSONResponse: *CreateVirtualRepositoryResponse( + modifiedRepoEntity, + c.getUpstreamProxyKeys(ctx, modifiedRepoEntity.UpstreamProxies), cleanupPolicies, + regInfo.rootIdentifier, c.URLProvider.RegistryURL(), + ), + }, nil +} + +func (c *APIController) updateUpstreamProxyWithAudit( + ctx context.Context, upstreamProxy *types.UpstreamProxyConfig, + principal types2.Principal, parentRef string, registryName string, +) error { + existingUpstreamProxy, err := c.UpstreamProxyStore.Get(ctx, upstreamProxy.RegistryID) + if err != nil { + log.Ctx(ctx).Warn().Msgf( + "failed to fig upstream proxy config for: %d", + upstreamProxy.RegistryID, + ) + } + + err = c.UpstreamProxyStore.Update(ctx, upstreamProxy) + if err != nil { + return err + } + if existingUpstreamProxy != nil { + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistryUpstreamProxy, registryName), + audit.ActionUpdated, + parentRef, + audit.WithOldObject( + audit.RegistryUpstreamProxyConfigObject{ + ID: existingUpstreamProxy.ID, + RegistryID: existingUpstreamProxy.RegistryID, + Source: existingUpstreamProxy.Source, + URL: existingUpstreamProxy.RepoURL, + AuthType: existingUpstreamProxy.RepoAuthType, + CreatedAt: existingUpstreamProxy.CreatedAt, + UpdatedAt: existingUpstreamProxy.UpdatedAt, + CreatedBy: existingUpstreamProxy.CreatedBy, + UpdatedBy: existingUpstreamProxy.UpdatedBy, + }, + ), + audit.WithNewObject( + audit.RegistryUpstreamProxyConfigObject{ + ID: upstreamProxy.ID, + RegistryID: upstreamProxy.RegistryID, + Source: upstreamProxy.Source, + URL: upstreamProxy.URL, + AuthType: upstreamProxy.AuthType, + CreatedAt: upstreamProxy.CreatedAt, + UpdatedAt: upstreamProxy.UpdatedAt, + CreatedBy: upstreamProxy.CreatedBy, + UpdatedBy: upstreamProxy.UpdatedBy, + }, + ), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf( + "failed to insert audit log for update upstream proxy "+ + "config operation: %s", auditErr, + ) + } + } + return err +} + +func (c *APIController) updateRegistryWithAudit( + ctx context.Context, oldRegistry *types.Registry, + newRegistry *types.Registry, principal types2.Principal, parentRef string, +) error { + err := c.RegistryRepository.Update(ctx, newRegistry) + if err != nil { + return err + } + auditErr := c.AuditService.Log( + ctx, + principal, + audit.NewResource(audit.ResourceTypeRegistry, newRegistry.Name), + audit.ActionUpdated, + parentRef, + audit.WithOldObject(newRegistry), + audit.WithNewObject(oldRegistry), + ) + if auditErr != nil { + log.Ctx(ctx).Warn().Msgf("failed to insert audit log for update registry operation: %s", auditErr) + } + + return err +} + +func throwModifyRegistry500Error(err error) artifact.ModifyRegistry500JSONResponse { + return artifact.ModifyRegistry500JSONResponse{ + InternalServerErrorJSONResponse: artifact.InternalServerErrorJSONResponse( + *GetErrorResponse(http.StatusInternalServerError, err.Error()), + ), + } +} + +func (c *APIController) updateCleanupPolicy( + ctx context.Context, config *artifact.ModifyRegistryJSONRequestBody, registryID int64, +) error { + existingCleanupPolicies, err := c.CleanupPolicyStore.GetIDsByRegistryID(ctx, registryID) + if err != nil { + return err + } + currentCleanupPolicyEntities := CreateCleanupPolicyEntity(config, registryID) + + err = c.CleanupPolicyStore.ModifyCleanupPolicies(ctx, currentCleanupPolicyEntities, existingCleanupPolicies) + + return err +} + +func UpdateRepoEntity( + dto artifact.RegistryRequest, + parentID int64, + rootParentID int64, + existingRepo *types.Registry, +) (*types.Registry, error) { + allowedPattern, blockedPattern, description, labels := getRepoEntityFields(dto) + e := ValidatePackageTypeChange(string(existingRepo.PackageType), string(dto.PackageType)) + if e != nil { + return nil, e + } + e = ValidateRepoTypeChange(string(existingRepo.Type), string(dto.Config.Type)) + if e != nil { + return nil, e + } + e = ValidateIdentifierChange(existingRepo.Name, dto.Identifier) + if e != nil { + return nil, e + } + entity := &types.Registry{ + Name: dto.Identifier, + ID: existingRepo.ID, + ParentID: parentID, + RootParentID: rootParentID, + Description: description, + AllowedPattern: allowedPattern, + BlockedPattern: blockedPattern, + PackageType: existingRepo.PackageType, + Type: existingRepo.Type, + Labels: labels, + CreatedAt: existingRepo.CreatedAt, + } + return entity, nil +} + +func UpdateUpstreamProxyEntity( + dto artifact.RegistryRequest, + parentID int64, + rootParentID int64, + u *types.UpstreamProxy, +) (*types.Registry, *types.UpstreamProxyConfig, error) { + allowedPattern := []string{} + if dto.AllowedPattern != nil { + allowedPattern = *dto.AllowedPattern + } + blockedPattern := []string{} + if dto.BlockedPattern != nil { + blockedPattern = *dto.BlockedPattern + } + e := ValidatePackageTypeChange(string(u.PackageType), string(dto.PackageType)) + if e != nil { + return nil, nil, e + } + e = ValidateIdentifierChange(u.RepoKey, dto.Identifier) + if e != nil { + return nil, nil, e + } + repoEntity := &types.Registry{ + ID: u.RegistryID, + Name: dto.Identifier, + ParentID: parentID, + RootParentID: rootParentID, + AllowedPattern: allowedPattern, + BlockedPattern: blockedPattern, + PackageType: dto.PackageType, + Type: artifact.RegistryTypeUPSTREAM, + CreatedAt: u.CreatedAt, + } + config, _ := dto.Config.AsUpstreamConfig() + CleanURLPath(config.Url) + upstreamProxyConfigEntity := &types.UpstreamProxyConfig{ + URL: *config.Url, + AuthType: string(config.AuthType), + RegistryID: u.RegistryID, + CreatedAt: u.CreatedAt, + } + if config.Source != nil && len(string(*config.Source)) > 0 { + err := ValidateUpstreamSource(string(*config.Source)) + if err != nil { + return nil, nil, err + } + upstreamProxyConfigEntity.Source = string(*config.Source) + } + if string(artifact.UpstreamConfigSourceDockerhub) == string(*config.Source) { + upstreamProxyConfigEntity.URL = "" + } + if u.ID != -1 { + upstreamProxyConfigEntity.ID = u.ID + } + if config.AuthType == artifact.AuthTypeUserPassword { + res, err := config.Auth.AsUserPassword() + if err != nil { + return nil, nil, err + } + upstreamProxyConfigEntity.UserName = res.UserName + upstreamProxyConfigEntity.SecretIdentifier = *res.SecretIdentifier + upstreamProxyConfigEntity.SecretSpaceID = *res.SecretSpaceId + } else { + upstreamProxyConfigEntity.UserName = "" + upstreamProxyConfigEntity.SecretIdentifier = "" + upstreamProxyConfigEntity.SecretSpaceID = 0 + } + return repoEntity, upstreamProxyConfigEntity, nil +} diff --git a/registry/app/api/controller/metadata/utils.go b/registry/app/api/controller/metadata/utils.go new file mode 100644 index 000000000..2f8d998b0 --- /dev/null +++ b/registry/app/api/controller/metadata/utils.go @@ -0,0 +1,408 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "errors" + "fmt" + "math" + "net/url" + "path" + "regexp" + "strconv" + "strings" + "time" + + api "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/types" + "github.com/harness/gitness/types/enum" + + "github.com/dustin/go-humanize" + "github.com/rs/zerolog/log" +) + +var registrySort = []string{ + "identifier", + "lastModified", + "registrySize", + "artifactsCount", + "downloadsCount", +} + +const ( + RepositoryResource = "repository" + ArtifactResource = "artifact" + ArtifactVersionResource = "artifactversion" + RegistryIdentifierErrorMsg = "registry name should be 1~255 characters long with lower case characters, numbers " + + "and ._- and must be start with numbers or characters" + RegexIdentifierPattern = "^[a-z0-9]+(?:[._-][a-z0-9]+)*$" +) + +var RegistrySortMap = map[string]string{ + "identifier": "name", + "lastModified": "updated_at", + "registrySize": "size", + "artifactsCount": "artifact_count", + "downloadsCount": "download_count", + "createdAt": "created_at", +} + +var artifactSort = []string{ + "repoKey", + "name", + "lastModified", + "downloadsCount", +} + +var artifactSortMap = map[string]string{ + "repoKey": "name", + "lastModified": "updated_at", + "name": "image_name", + "downloadsCount": "image_name", + "createdAt": "created_at", +} + +var artifactVersionSort = []string{ + "name", + "size", + "pullCommand", + "downloadsCount", + "lastModified", +} + +var artifactVersionSortMap = map[string]string{ + "name": "name", + "size": "name", + "pullCommand": "name", + "downloadsCount": "name", + "lastModified": "updated_at", + "createdAt": "created_at", +} + +var validRepositoryTypes = []string{ + string(api.RegistryTypeUPSTREAM), + string(api.RegistryTypeVIRTUAL), +} + +var validPackageTypes = []string{ + string(api.PackageTypeDOCKER), + string(api.PackageTypeHELM), + string(api.PackageTypeMAVEN), +} + +var validUpstreamSources = []string{ + string(api.UpstreamConfigSourceCustom), + string(api.UpstreamConfigSourceDockerhub), +} + +func ValidatePackageTypes(packageTypes []string) error { + if commons.IsEmpty(packageTypes) || IsPackageTypesValid(packageTypes) { + return nil + } + return errors.New("invalid package type") +} + +func ValidatePackageType(packageType string) error { + if len(packageType) == 0 || IsPackageTypeValid(packageType) { + return nil + } + return errors.New("invalid package type") +} + +func ValidatePackageTypeChange(fromDB, newPackage string) error { + if len(fromDB) > 0 && len(newPackage) > 0 && fromDB == newPackage { + return nil + } + return errors.New("package type change is not allowed") +} + +func ValidateRepoTypeChange(fromDB, newRepo string) error { + if len(fromDB) > 0 && len(newRepo) > 0 && fromDB == newRepo { + return nil + } + return errors.New("registry type change is not allowed") +} + +func ValidateIdentifierChange(fromDB, newIdentifier string) error { + if len(fromDB) > 0 && len(newIdentifier) > 0 && fromDB == newIdentifier { + return nil + } + return errors.New("registry identifier change is not allowed") +} + +func ValidateIdentifier(identifier string) error { + if len(identifier) == 0 { + return errors.New(RegistryIdentifierErrorMsg) + } + + matched, err := regexp.MatchString(RegexIdentifierPattern, identifier) + if err != nil || !matched { + return errors.New(RegistryIdentifierErrorMsg) + } + return nil +} + +func ValidateUpstream(config *api.RegistryConfig) error { + if !commons.IsEmpty(config.Type) && config.Type == api.RegistryTypeUPSTREAM { + upstreamConfig, err := config.AsUpstreamConfig() + if err != nil { + return err + } + if commons.IsEmpty(upstreamConfig.Url) { + return errors.New("URL is required for upstream repository") + } + } + return nil +} + +func ValidateRepoType(repoType string) error { + if len(repoType) == 0 || IsRepoTypeValid(repoType) { + return nil + } + return errors.New("invalid repository type") +} + +func ValidateUpstreamSource(source string) error { + if len(source) == 0 || IsUpstreamSourceValid(source) { + return nil + } + return errors.New("invalid upstream proxy source") +} + +func IsRepoTypeValid(repoType string) bool { + for _, item := range validRepositoryTypes { + if item == repoType { + return true + } + } + return false +} + +func IsUpstreamSourceValid(source string) bool { + for _, item := range validUpstreamSources { + if item == source { + return true + } + } + return false +} + +func IsPackageTypeValid(packageType string) bool { + for _, item := range validPackageTypes { + if item == packageType { + return true + } + } + return false +} + +func IsPackageTypesValid(packageTypes []string) bool { + for _, item := range packageTypes { + if !IsPackageTypeValid(item) { + return false + } + } + return true +} + +func GetTimeInMs(t time.Time) string { + return fmt.Sprint(t.UnixMilli()) +} + +func GetErrorResponse(code int, message string) *api.Error { + return &api.Error{ + Code: fmt.Sprint(code), + Message: message, + } +} + +func GetSortByOrder(sortOrder string) string { + defaultSortOrder := "ASC" + decreasingSortOrder := "DESC" + if len(sortOrder) == 0 { + return defaultSortOrder + } + if sortOrder == decreasingSortOrder { + return decreasingSortOrder + } + return defaultSortOrder +} + +func sortKey(slice []string, target string) string { + for _, item := range slice { + if item == target { + return item + } + } + return "createdAt" +} + +func GetSortByField(sortByField string, resource string) string { + switch resource { + case RepositoryResource: + sortkey := sortKey(registrySort, sortByField) + return RegistrySortMap[sortkey] + case ArtifactResource: + sortkey := sortKey(artifactSort, sortByField) + return artifactSortMap[sortkey] + case ArtifactVersionResource: + sortkey := sortKey(artifactVersionSort, sortByField) + return artifactVersionSortMap[sortkey] + } + return "created_at" +} + +func GetPageLimit(pageSize *api.PageSize) int { + defaultPageSize := 10 + if pageSize != nil { + return int(*pageSize) + } + return defaultPageSize +} + +func GetOffset(pageSize *api.PageSize, pageNumber *api.PageNumber) int { + defaultOffset := 0 + if pageSize == nil || pageNumber == nil { + return defaultOffset + } + if *pageNumber == 0 { + return 0 + } + return (int(*pageSize)) * int(*pageNumber) +} + +func GetPageNumber(pageNumber *api.PageNumber) int64 { + defaultPageNumber := int64(1) + if pageNumber == nil { + return defaultPageNumber + } + return int64(*pageNumber) +} + +func GetSuccessResponse() *api.Success { + return &api.Success{ + Status: api.StatusSUCCESS, + } +} + +func GetPageCount(count int64, pageSize int) int64 { + return int64(math.Ceil(float64(count) / float64(pageSize))) +} + +func GetImageSize(size string) string { + sizeVal, _ := strconv.ParseInt(size, 10, 64) + return GetSize(sizeVal) +} + +func GetSize(sizeVal int64) string { + humanReadable := humanize.Bytes(uint64(sizeVal)) + return humanReadable +} + +func GetRegRef(parentRef string, regIdentifier string) (string, error) { + result := "" + if commons.IsEmpty(parentRef) || commons.IsEmpty(regIdentifier) { + return result, errors.New("parentRef or regIdentifier is empty") + } + return parentRef + "/" + regIdentifier, nil +} + +func GetRepoURL(rootIdentifier, registry string, registryURL string) string { + parsedURL, err := url.Parse(registryURL) + if err != nil { + log.Error().Err(err).Msgf("Error parsing URL: %s", registryURL) + return "" + } + parsedURL.Path = path.Join(parsedURL.Path, strings.ToLower(rootIdentifier), registry) + return parsedURL.String() +} + +func GetRepoURLWithoutProtocol(rootIdentifier string, registry string, registryURL string) string { + repoURL := GetRepoURL(rootIdentifier, registry, registryURL) + parsedURL, err := url.Parse(repoURL) + if err != nil { + log.Error().Stack().Err(err).Msg("Error parsing URL: ") + return "" + } + + return parsedURL.Host + parsedURL.Path +} + +func GetTagURL(rootIdentifier string, artifact string, version string, registry string, registryURL string) string { + url := GetRepoURL(rootIdentifier, registry, registryURL) + url += "/" + artifact + "/" + url += version + return url +} + +func GetPullCommand( + rootIdentifier string, registry string, image string, tag string, + packageType string, registryURL string, +) string { + if packageType == "DOCKER" { + return GetDockerPullCommand(rootIdentifier, registry, image, tag, registryURL) + } else if packageType == "HELM" { + return GetHelmPullCommand(rootIdentifier, registry, image, tag, registryURL) + } + return "" +} + +func GetDockerPullCommand( + rootIdentifier string, registry string, image string, + tag string, registryURL string, +) string { + return "docker pull " + GetRepoURLWithoutProtocol(rootIdentifier, registry, registryURL) + "/" + image + ":" + tag +} + +func GetHelmPullCommand(rootIdentifier string, registry string, image string, tag string, registryURL string) string { + return "helm install " + GetRepoURLWithoutProtocol(rootIdentifier, registry, registryURL) + "/" + image + ":" + tag +} + +// CleanURLPath removes leading and trailing spaces and trailing slashes from the given URL string. +func CleanURLPath(input *string) { + if input == nil { + return + } + // Parse the input to URL + u, err := url.Parse(*input) + if err != nil { + return + } + + // Clean the path by removing trailing slashes and spaces + cleanedPath := strings.TrimRight(strings.TrimSpace(u.Path), "/") + + // Update the URL path in the original input string + u.Path = cleanedPath + + // Update the input string with the cleaned URL string representation + *input = u.String() +} + +func getPermissionChecks( + space *types.Space, + registryIdentifier string, + permission enum.Permission, +) []types.PermissionCheck { + var permissionChecks []types.PermissionCheck + permissionCheck := &types.PermissionCheck{ + Scope: types.Scope{SpacePath: space.Identifier}, + Resource: types.Resource{Type: enum.ResourceTypeRegistry, Identifier: registryIdentifier}, + Permission: permission, + } + permissionChecks = append(permissionChecks, *permissionCheck) + return permissionChecks +} diff --git a/registry/app/api/handler/oci/artifactfilter.go b/registry/app/api/handler/oci/artifactfilter.go new file mode 100644 index 000000000..0afcfd680 --- /dev/null +++ b/registry/app/api/handler/oci/artifactfilter.go @@ -0,0 +1,90 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "errors" + "fmt" + "regexp" + + "github.com/lib/pq" + "github.com/rs/zerolog/log" +) + +func MatchArtifactFilter( + allowedPattern pq.StringArray, + blockedPattern pq.StringArray, artifact string, +) (bool, error) { + allowedPatterns := []string(allowedPattern) + blockedPatterns := []string(blockedPattern) + + if len(blockedPatterns) > 0 { + flag, err := matchPatterns(blockedPatterns, artifact) + if err != nil { + return flag, fmt.Errorf( + "failed to match blocked patterns for artifact %s: %w", + artifact, err, + ) + } + if flag { + return false, errors.New( + "failed because artifact seems to be matching blocked patterns configured on repository", + ) + } + } + + if len(allowedPatterns) > 0 { + flag, err := matchPatterns(allowedPatterns, artifact) + if err != nil { + return flag, fmt.Errorf( + "failed to match allowed patterns for artifact %s: %w", + artifact, err, + ) + } + + if !flag { + return false, errors.New( + "failed because artifact doesn't seems to be matching allowed patterns configured on repository", + ) + } + } + return true, nil +} + +func matchPatterns( + patterns []string, + val string, +) (bool, error) { + for _, pattern := range patterns { + flag, err := regexp.MatchString(pattern, val) + if err != nil { + log.Error().Err(err).Msgf( + "failed to match pattern %s for val %s", + pattern, + val, + ) + return flag, fmt.Errorf( + "failed to match pattern %s for val %s: %w", + pattern, + val, + err, + ) + } + if flag { + return true, nil + } + } + return false, nil +} diff --git a/registry/app/api/handler/oci/base.go b/registry/app/api/handler/oci/base.go new file mode 100644 index 000000000..db0d7bbfd --- /dev/null +++ b/registry/app/api/handler/oci/base.go @@ -0,0 +1,249 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "context" + "net/http" + "net/url" + "strings" + + usercontroller "github.com/harness/gitness/app/api/controller/user" + "github.com/harness/gitness/app/auth/authn" + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + urlprovider "github.com/harness/gitness/app/url" + "github.com/harness/gitness/registry/app/api/controller/metadata" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/pkg/docker" + + v2 "github.com/distribution/distribution/v3/registry/api/v2" + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +func NewHandler( + controller *docker.Controller, spaceStore corestore.SpaceStore, tokenStore corestore.TokenStore, + userCtrl *usercontroller.Controller, authenticator authn.Authenticator, urlProvider urlprovider.Provider, + authorizer authz.Authorizer, +) *Handler { + return &Handler{ + Controller: controller, + SpaceStore: spaceStore, + TokenStore: tokenStore, + UserCtrl: userCtrl, + Authenticator: authenticator, + URLProvider: urlProvider, + Authorizer: authorizer, + } +} + +type Handler struct { + Controller *docker.Controller + SpaceStore corestore.SpaceStore + TokenStore corestore.TokenStore + UserCtrl *usercontroller.Controller + Authenticator authn.Authenticator + URLProvider urlprovider.Provider + Authorizer authz.Authorizer +} + +type routeType string + +const ( + Manifests routeType = "manifests" // /v2/:registry/:image/manifests/:reference. + Blobs routeType = "blobs" // /v2/:registry/:image/blobs/:digest. + BlobsUploadsSession routeType = "blob-uploads-session" // /v2/:registry/:image/blobs/uploads/:session_id. + Tags routeType = "tags" // /v2/:registry/:image/tags/list. + Referrers routeType = "referrers" // /v2/:registry/:image/referrers/:digest. + Invalid routeType = "invalid" // Invalid route. + MinSizeOfURLSegments = 5 + + APIPartManifest = "manifests" + APIPartBlobs = "blobs" + APIPartUpload = "uploads" + APIPartTag = "tags" + APIPartReferrer = "referrers" + // Add other route types here. +) + +func getRouteType(url string) routeType { + url = strings.Trim(url, "/") + segments := strings.Split(url, "/") + if len(segments) < MinSizeOfURLSegments { + return Invalid + } + typ := segments[len(segments)-2] + switch typ { + case APIPartManifest: + return Manifests + case APIPartBlobs: + if segments[len(segments)-1] == APIPartUpload { + return BlobsUploadsSession + } + return Blobs + case APIPartUpload: + return BlobsUploadsSession + case APIPartTag: + return Tags + case APIPartReferrer: + return Referrers + } + return Invalid +} + +func GetQueryParamMap(queryParams url.Values) map[string]string { + queryMap := make(map[string]string) + for key, values := range queryParams { + if len(values) > 0 { + queryMap[key] = values[0] + } + } + return queryMap +} + +// ExtractPathVars extracts registry, image, reference, digest and tag from the path +// Path format: /v2/:rootSpace/:registry/:image/manifests/:reference (for ex: +// /v2/myRootSpace/reg1/alpine/blobs/sha256:a258b2a6b59a7aa244d8ceab095c7f8df726f27075a69fca7ad8490f3f63148a). +func ExtractPathVars(path string, paramMap map[string]string) (rootIdentifier, registry, image, ref, dgst, tag string) { + path = strings.Trim(path, "/") + segments := strings.Split(path, "/") + rootIdentifier = segments[1] + registry = segments[2] + image = strings.Join(segments[3:len(segments)-2], "/") + typ := getRouteType(path) + + switch typ { + case Manifests: + ref = segments[len(segments)-1] + _, err := digest.Parse(ref) + if err != nil { + tag = ref + } else { + dgst = ref + } + case Blobs: + dgst = segments[len(segments)-1] + case BlobsUploadsSession: + if segments[len(segments)-1] != APIPartUpload && segments[len(segments)-2] == APIPartUpload { + image = strings.Join(segments[3:len(segments)-3], "/") + ref = segments[len(segments)-1] + } + if _, ok := paramMap["digest"]; ok { + dgst = paramMap["digest"] + } + case Tags: + // do nothing. + case Referrers: + dgst = segments[len(segments)-1] + case Invalid: + log.Warn().Msgf("Invalid route: %s", path) + default: + log.Warn().Msgf("Unknown route type: %s", typ) + } + + log.Debug().Msgf( + "For path: %s, rootIdentifier: %s, registry: %s, image: %s, ref: %s, dgst: %s, tag: %s", + path, rootIdentifier, registry, image, ref, dgst, tag, + ) + + return rootIdentifier, registry, image, ref, dgst, tag +} + +func handleErrors(ctx context.Context, errors errcode.Errors, w http.ResponseWriter) { + if !commons.IsEmpty(errors) { + _ = errcode.ServeJSON(w, errors) + docker.LogError(errors) + log.Ctx(ctx).Error().Errs("OCI errors", errors).Msgf("Error occurred") + } else if status, ok := ctx.Value("http.response.status").(int); ok && status >= 200 && status <= 399 { + dcontext.GetResponseLogger(ctx, log.Info()).Msg("response completed") + } +} + +func (h *Handler) getRegistryInfo(r *http.Request, remoteSupport bool) (pkg.RegistryInfo, error) { + ctx := r.Context() + queryParams := r.URL.Query() + path := r.URL.Path + paramMap := GetQueryParamMap(queryParams) + rootIdentifier, registryIdentifier, image, ref, dgst, tag := ExtractPathVars(path, paramMap) + if err := metadata.ValidateIdentifier(rootIdentifier); err != nil { + return pkg.RegistryInfo{}, err + } + + rootSpace, err := h.SpaceStore.FindByRef(ctx, rootIdentifier) + if err != nil { + log.Ctx(ctx).Error().Msgf("Root space not found: %s", rootIdentifier) + return pkg.RegistryInfo{}, errcode.ErrCodeRootNotFound + } + + registry, err := h.Controller.RegistryDao.GetByRootParentIDAndName(ctx, rootSpace.ID, registryIdentifier) + if err != nil { + log.Ctx(ctx).Error().Msgf( + "registry %s not found for root: %s. Reason: %s", registryIdentifier, rootSpace.Identifier, err, + ) + return pkg.RegistryInfo{}, errcode.ErrCodeRegNotFound + } + _, err = h.SpaceStore.Find(r.Context(), registry.ParentID) + if err != nil { + log.Ctx(ctx).Error().Msgf("Parent space not found: %d", registry.ParentID) + return pkg.RegistryInfo{}, errcode.ErrCodeParentNotFound + } + + info := &pkg.RegistryInfo{ + ArtifactInfo: &pkg.ArtifactInfo{ + BaseInfo: &pkg.BaseInfo{ + RootIdentifier: rootIdentifier, + RootParentID: rootSpace.ID, + ParentID: registry.ParentID, + }, + RegIdentifier: registryIdentifier, + Image: image, + }, + Reference: ref, + Digest: dgst, + Tag: tag, + URLBuilder: v2.NewURLBuilderFromRequest(r, false), + Path: r.URL.Path, + } + + log.Ctx(ctx).Info().Msgf("Dispatch: URI: %s", path) + if commons.IsEmpty(rootSpace.Identifier) { + log.Ctx(ctx).Error().Msgf("ParentRef not found in context") + return pkg.RegistryInfo{}, errcode.ErrCodeParentNotFound + } + + if commons.IsEmpty(registryIdentifier) { + log.Ctx(ctx).Warn().Msgf("registry not found in context") + return pkg.RegistryInfo{}, errcode.ErrCodeRegNotFound + } + + if !commons.IsEmpty(info.Image) && !commons.IsEmpty(info.Tag) { + flag, err2 := MatchArtifactFilter(registry.AllowedPattern, registry.BlockedPattern, info.Image+":"+info.Tag) + if !flag || err2 != nil { + return pkg.RegistryInfo{}, errcode.ErrCodeDenied + } + } + + if registry.Type == artifact.RegistryTypeUPSTREAM && !remoteSupport { + log.Ctx(ctx).Warn().Msgf("Remote registryIdentifier %s not supported", registryIdentifier) + return pkg.RegistryInfo{}, errcode.ErrCodeDenied + } + + return *info, nil +} diff --git a/registry/app/api/handler/oci/delete_blob.go b/registry/app/api/handler/oci/delete_blob.go new file mode 100644 index 000000000..95ab4174a --- /dev/null +++ b/registry/app/api/handler/oci/delete_blob.go @@ -0,0 +1,35 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + + headers, errs := h.Controller.DeleteBlob(r.Context(), info) + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/delete_blob_upload.go b/registry/app/api/handler/oci/delete_blob_upload.go new file mode 100644 index 000000000..a917e75f9 --- /dev/null +++ b/registry/app/api/handler/oci/delete_blob_upload.go @@ -0,0 +1,35 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + + headers, errs := h.Controller.CancelBlobUpload(r.Context(), info, r.FormValue("_state")) + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/delete_manifest.go b/registry/app/api/handler/oci/delete_manifest.go new file mode 100644 index 000000000..0274244b1 --- /dev/null +++ b/registry/app/api/handler/oci/delete_manifest.go @@ -0,0 +1,47 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" + + "github.com/rs/zerolog/log" +) + +// PutManifest validates and stores a manifest in the registry. +func (h *Handler) DeleteManifest(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + length := r.ContentLength + if length > 0 { + r.Body = http.MaxBytesReader(w, r.Body, length) + } + errs, headers := h.Controller.DeleteManifest(r.Context(), info) + + if !commons.IsEmpty(errs) { + log.Ctx(ctx).Error().Msgf("DeleteManifest: %v", errs) + } + + if headers != nil { + headers.WriteToResponse(w) + } + handleErrors(ctx, errs, w) +} diff --git a/registry/app/api/handler/oci/get_base.go b/registry/app/api/handler/oci/get_base.go new file mode 100644 index 000000000..29cf7a3b3 --- /dev/null +++ b/registry/app/api/handler/oci/get_base.go @@ -0,0 +1,23 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" +) + +func (h *Handler) APIBase(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) +} diff --git a/registry/app/api/handler/oci/get_blob.go b/registry/app/api/handler/oci/get_blob.go new file mode 100644 index 000000000..37f734bf8 --- /dev/null +++ b/registry/app/api/handler/oci/get_blob.go @@ -0,0 +1,91 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/pkg/docker" + + "github.com/rs/zerolog/log" +) + +func (h *Handler) GetBlob(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + info, err := h.getRegistryInfo(r, true) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + result := h.Controller.GetBlob(ctx, info) + + response, ok := result.(*docker.GetBlobResponse) + if !ok { + log.Ctx(ctx).Error().Msg("Failed to cast result to GetBlobResponse") + handleErrors(ctx, []error{errors.New("failed to cast result to GetBlobResponse")}, w) + return + } + defer func() { + if response.Body != nil { + response.Body.Close() + } + if response.ReadCloser != nil { + response.ReadCloser.Close() + } + }() + + if commons.IsEmpty(response.GetErrors()) { + if !commons.IsEmpty(response.RedirectURL) { + http.Redirect(w, r, response.RedirectURL, http.StatusTemporaryRedirect) + return + } + response.ResponseHeaders.WriteHeadersToResponse(w) + if r.Method == http.MethodHead { + return + } + + h.serveContent(w, r, response, info) + response.ResponseHeaders.WriteToResponse(w) + } + + handleErrors(r.Context(), response.GetErrors(), w) +} + +func (h *Handler) serveContent( + w http.ResponseWriter, r *http.Request, response *docker.GetBlobResponse, info pkg.RegistryInfo, +) { + if response.Body != nil { + http.ServeContent(w, r, info.Digest, time.Time{}, response.Body) + } else { + // Use io.CopyN to avoid out of memory when pulling big blob + written, err2 := io.CopyN(w, response.ReadCloser, response.Size) + if err2 != nil { + response.Errors = append(response.Errors, errors.New("error copying blob to response")) + log.Ctx(r.Context()).Error().Msg("error copying blob to response:") + } + if written != response.Size { + response.Errors = append( + response.Errors, + fmt.Errorf(fmt.Sprintf("The size mismatch, actual:%d, expected: %d", written, response.Size)), + ) + } + } +} diff --git a/registry/app/api/handler/oci/get_blob_upload.go b/registry/app/api/handler/oci/get_blob_upload.go new file mode 100644 index 000000000..bd9378ac6 --- /dev/null +++ b/registry/app/api/handler/oci/get_blob_upload.go @@ -0,0 +1,37 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) GetUploadBlobStatus(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + stateToken := r.FormValue("_state") + headers, errs := h.Controller.GetUploadBlobStatus(r.Context(), info, stateToken) + + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + return + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/get_catalog.go b/registry/app/api/handler/oci/get_catalog.go new file mode 100644 index 000000000..739f801f1 --- /dev/null +++ b/registry/app/api/handler/oci/get_catalog.go @@ -0,0 +1,21 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import "net/http" + +func (h *Handler) GetCatalog(w http.ResponseWriter, r *http.Request) { + h.Controller.GetCatalog(w, r) +} diff --git a/registry/app/api/handler/oci/get_manifest.go b/registry/app/api/handler/oci/get_manifest.go new file mode 100644 index 000000000..cf65e8c9a --- /dev/null +++ b/registry/app/api/handler/oci/get_manifest.go @@ -0,0 +1,57 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/pkg/docker" + + "github.com/rs/zerolog/log" +) + +// GetManifest fetches the image manifest from the storage backend, if it exists. +func (h *Handler) GetManifest(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + info, err := h.getRegistryInfo(r, true) + if err != nil { + handleErrors(ctx, errcode.Errors{err}, w) + return + } + + result := h.Controller.PullManifest( + ctx, + info, + r.Header[commons.HeaderAccept], + r.Header[commons.HeaderIfNoneMatch], + ) + if commons.IsEmpty(result.GetErrors()) { + response, ok := result.(*docker.GetManifestResponse) + if !ok { + log.Ctx(ctx).Error().Msg("Failed to cast result to GetManifestResponse") + return + } + response.ResponseHeaders.WriteToResponse(w) + _, bytes, _ := response.Manifest.Payload() + if _, err := w.Write(bytes); err != nil { + log.Ctx(ctx).Error().Err(err).Msg("Failed to write response") + response.ResponseHeaders.Code = http.StatusInternalServerError + } + return + } + handleErrors(ctx, result.GetErrors(), w) +} diff --git a/registry/app/api/handler/oci/get_referrers.go b/registry/app/api/handler/oci/get_referrers.go new file mode 100644 index 000000000..9d26e0dc7 --- /dev/null +++ b/registry/app/api/handler/oci/get_referrers.go @@ -0,0 +1,45 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "encoding/json" + "net/http" + + "github.com/harness/gitness/registry/app/dist_temp/errcode" +) + +func (h *Handler) GetReferrers(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + defer r.Body.Close() + errorsList := make(errcode.Errors, 0) + + index, responseHeaders, err := h.Controller.GetReferrers(r.Context(), info, r.URL.Query().Get("artifactType")) + if err != nil { + errorsList = append(errorsList, err) + } + if index != nil { + responseHeaders.WriteHeadersToResponse(w) + if err := json.NewEncoder(w).Encode(index); err != nil { + errorsList = append(errorsList, errcode.ErrCodeUnknown.WithDetail(err)) + } + } + + handleErrors(r.Context(), errorsList, w) +} diff --git a/registry/app/api/handler/oci/get_tags.go b/registry/app/api/handler/oci/get_tags.go new file mode 100644 index 000000000..85a8b6d5b --- /dev/null +++ b/registry/app/api/handler/oci/get_tags.go @@ -0,0 +1,68 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "encoding/json" + "net/http" + "strconv" + + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/pkg/docker" + + "github.com/rs/zerolog/log" +) + +func (h *Handler) GetTags(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(ctx, []error{err}, w) + return + } + errorsList := make(errcode.Errors, 0) + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("Failed to parse max entries %s", q.Get("n")) + maxEntries = docker.DefaultMaximumReturnedEntries + } + + if maxEntries <= 0 { + maxEntries = docker.DefaultMaximumReturnedEntries + } + + rs, tags, err := h.Controller.GetTags(ctx, lastEntry, maxEntries, r.URL.String(), info) + log.Ctx(ctx).Debug().Msgf("GetTags: %v %s", rs, tags) + + if err != nil { + log.Ctx(ctx).Error().Err(err).Msg("Failed to list tags") + handleErrors(ctx, errorsList, w) + return + } + rs.WriteHeadersToResponse(w) + enc := json.NewEncoder(w) + if err := enc.Encode( + docker.TagsAPIResponse{ + Name: info.RegIdentifier, + Tags: tags, + }, + ); err != nil { + errorsList = append(errorsList, errcode.ErrCodeUnknown.WithDetail(err)) + } + handleErrors(ctx, errorsList, w) +} diff --git a/registry/app/api/handler/oci/get_token.go b/registry/app/api/handler/oci/get_token.go new file mode 100644 index 000000000..c8bfe7da3 --- /dev/null +++ b/registry/app/api/handler/oci/get_token.go @@ -0,0 +1,212 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/render" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/app/auth" + "github.com/harness/gitness/app/jwt" + "github.com/harness/gitness/app/paths" + "github.com/harness/gitness/app/token" + "github.com/harness/gitness/types" + "github.com/harness/gitness/types/enum" + + "github.com/rs/zerolog/log" +) + +func (h *Handler) GetToken(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + session, ok := request.AuthSessionFrom(ctx) + if !ok || session.Principal == auth.AnonymousPrincipal { + returnForbiddenResponse(w, fmt.Errorf("no auth session found")) + return + } + + if tokenMetadata, okt := session.Metadata.(*auth.TokenMetadata); okt && + tokenMetadata.TokenType != enum.TokenTypePAT { + returnForbiddenResponse(w, fmt.Errorf("only personal access token allowed")) + return + } + + user, err := h.UserCtrl.FindNoAuth(ctx, session.Principal.UID) + if err != nil { + returnForbiddenResponse(w, err) + return + } + + requestedOciAccess := GetRequestedResourceActions(getScopes(r.URL)) + var accessPermissionsList = []jwt.AccessPermissions{} + for _, ra := range requestedOciAccess { + space, err := h.getSpace(ctx, ra.Name) + if err != nil { + render.TranslatedUserError(ctx, w, err) + log.Ctx(ctx).Warn().Msgf("failed to find space by ref: %v", err) + continue + } + + accessPermissionsList = h.getAccessPermissionList(ctx, space, ra, session, accessPermissionsList) + } + + subClaimsAccessPermissions := &jwt.SubClaimsAccessPermissions{ + Source: jwt.OciSource, + Permissions: accessPermissionsList, + } + + jwtToken, err := h.getTokenDetails(user, subClaimsAccessPermissions) + if err != nil { + returnForbiddenResponse(w, err) + return + } + if jwtToken != "" { + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(fmt.Sprintf("{\"token\":\"%s\"}", jwtToken))) + if err != nil { + log.Error().Msgf("failed to write token response: %v", err) + } + return + } +} + +func (h *Handler) getSpace(ctx context.Context, name string) (*types.Space, error) { + spaceRef, _, _ := paths.DisectRoot(name) + space, err := h.SpaceStore.FindByRef(ctx, spaceRef) + return space, err +} + +func (h *Handler) getAccessPermissionList( + ctx context.Context, space *types.Space, ra *ResourceActions, session *auth.Session, + accessPermissionsList []jwt.AccessPermissions, +) []jwt.AccessPermissions { + accessPermissions := &jwt.AccessPermissions{SpaceID: space.ID, Permissions: []enum.Permission{}} + + for _, a := range ra.Actions { + permission, err := getPermissionFromAction(ctx, a) + if err != nil { + log.Ctx(ctx).Warn().Msgf("failed to get permission from action: %v", err) + continue + } + scopeErr := apiauth.CheckSpaceScope( + ctx, + h.Authorizer, + session, + space, + enum.ResourceTypeRegistry, + permission, + ) + if scopeErr != nil { + log.Ctx(ctx).Warn().Msgf("failed to check space scope: %v", scopeErr) + continue + } + accessPermissions.Permissions = append(accessPermissions.Permissions, permission) + } + accessPermissionsList = append(accessPermissionsList, *accessPermissions) + return accessPermissionsList +} + +func getPermissionFromAction(ctx context.Context, action string) (enum.Permission, error) { + switch action { + case "pull": + return enum.PermissionArtifactsDownload, nil + case "push": + return enum.PermissionArtifactsUpload, nil + case "delete": + return enum.PermissionArtifactsDelete, nil + default: + err := fmt.Errorf("unknown action: %s", action) + log.Ctx(ctx).Err(err).Msgf("Failed to get permission from action: %v", err) + return "", err + } +} + +func returnForbiddenResponse(w http.ResponseWriter, err error) { + w.WriteHeader(http.StatusForbidden) + _, err2 := w.Write([]byte(fmt.Sprintf("requested access to the resource is denied: %v", err))) + if err2 != nil { + log.Error().Msgf("failed to write token response: %v", err2) + } +} + +/* + * getTokenDetails attempts to get token details. + */ +func (h *Handler) getTokenDetails( + user *types.User, + accessPermissions *jwt.SubClaimsAccessPermissions, +) (string, error) { + return token.CreateUserWithAccessPermissions(user, accessPermissions) +} + +// GetRequestedResourceActions ... +func GetRequestedResourceActions(scopes []string) []*ResourceActions { + var res []*ResourceActions + for _, s := range scopes { + if s == "" { + continue + } + items := strings.Split(s, ":") + length := len(items) + + var resourceType string + var resourceName string + actions := make([]string, 0) + + switch length { + case 1: + resourceType = items[0] + case 2: + resourceType = items[0] + resourceName = items[1] + default: + resourceType = items[0] + resourceName = strings.Join(items[1:length-1], ":") + if len(items[length-1]) > 0 { + actions = strings.Split(items[length-1], ",") + } + } + + res = append( + res, &ResourceActions{ + Type: resourceType, + Name: resourceName, + Actions: actions, + }, + ) + } + return res +} + +func getScopes(u *url.URL) []string { + var sector string + var result []string + for _, sector = range u.Query()["scope"] { + result = append(result, strings.Split(sector, " ")...) + } + return result +} + +// ResourceActions stores allowed actions on a resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} diff --git a/registry/app/api/handler/oci/head_blob.go b/registry/app/api/handler/oci/head_blob.go new file mode 100644 index 000000000..57237b37f --- /dev/null +++ b/registry/app/api/handler/oci/head_blob.go @@ -0,0 +1,48 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) HeadBlob(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + headers, body, _, readCloser, redirectURL, errs := h.Controller.HeadBlob(r.Context(), info) + defer func() { + if body != nil { + body.Close() + } + if readCloser != nil { + readCloser.Close() + } + }() + + if commons.IsEmpty(errs) { + if redirectURL != "" { + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return + } + + headers.WriteHeadersToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/head_manifest.go b/registry/app/api/handler/oci/head_manifest.go new file mode 100644 index 000000000..0c771babd --- /dev/null +++ b/registry/app/api/handler/oci/head_manifest.go @@ -0,0 +1,43 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/pkg/docker" +) + +// HeadManifest fetches the image manifest from the storage backend, if it exists. +func (h *Handler) HeadManifest(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, true) + if err != nil { + handleErrors(r.Context(), errcode.Errors{err}, w) + return + } + + result := h.Controller.HeadManifest( + r.Context(), + info, + r.Header[commons.HeaderAccept], + r.Header[commons.HeaderIfNoneMatch], + ) + if commons.IsEmpty(result.GetErrors()) { + result.(*docker.GetManifestResponse).ResponseHeaders.WriteToResponse(w) + } + handleErrors(r.Context(), result.GetErrors(), w) +} diff --git a/registry/app/api/handler/oci/patch_blob_upload.go b/registry/app/api/handler/oci/patch_blob_upload.go new file mode 100644 index 000000000..4f42c6dad --- /dev/null +++ b/registry/app/api/handler/oci/patch_blob_upload.go @@ -0,0 +1,43 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) PatchBlobUpload(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + ct := r.Header.Get(commons.HeaderContentType) + cr := r.Header.Get(commons.HeaderContentRange) + cl := r.Header.Get(commons.HeaderContentLength) + length := r.ContentLength + if length > 0 { + r.Body = http.MaxBytesReader(w, r.Body, length) + } + stateToken := r.FormValue("_state") + headers, errs := h.Controller.PatchBlobUpload(r.Context(), info, ct, cr, cl, length, stateToken, r.Body) + + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/post_blob_upload.go b/registry/app/api/handler/oci/post_blob_upload.go new file mode 100644 index 000000000..074437d9c --- /dev/null +++ b/registry/app/api/handler/oci/post_blob_upload.go @@ -0,0 +1,42 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + "strings" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) InitiateUploadBlob(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + fromParam := r.FormValue("from") + fromParamParts := strings.Split(fromParam, "/") + fromRepo := "" + if len(fromParamParts) > 1 { + fromRepo = fromParamParts[1] + } + mountDigest := r.FormValue("mount") + headers, errs := h.Controller.InitiateUploadBlob(r.Context(), info, fromRepo, mountDigest) + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/put_blob_upload.go b/registry/app/api/handler/oci/put_blob_upload.go new file mode 100644 index 000000000..e4ecc2c9f --- /dev/null +++ b/registry/app/api/handler/oci/put_blob_upload.go @@ -0,0 +1,41 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +func (h *Handler) CompleteBlobUpload(w http.ResponseWriter, r *http.Request) { + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + stateToken := r.FormValue("_state") + length := r.ContentLength + if length > 0 { + r.Body = http.MaxBytesReader(w, r.Body, length) + } + + headers, errs := h.Controller.CompleteBlobUpload(r.Context(), info, r.Body, r.ContentLength, stateToken) + + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(r.Context(), errs, w) +} diff --git a/registry/app/api/handler/oci/put_manifest.go b/registry/app/api/handler/oci/put_manifest.go new file mode 100644 index 000000000..3f93e3eca --- /dev/null +++ b/registry/app/api/handler/oci/put_manifest.go @@ -0,0 +1,50 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/pkg/commons" + + "github.com/rs/zerolog/log" +) + +const ( + maxManifestBodySize = 4 * 1024 * 1024 +) + +// PutManifest validates and stores a manifest in the registry. +func (h *Handler) PutManifest(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + info, err := h.getRegistryInfo(r, false) + if err != nil { + handleErrors(r.Context(), []error{err}, w) + return + } + mediaType := r.Header.Get("Content-Type") + length := r.ContentLength + r.Body = http.MaxBytesReader(w, r.Body, maxManifestBodySize) + + headers, errs := h.Controller.PutManifest(r.Context(), info, mediaType, r.Body, length) + if !commons.IsEmpty(errs) { + log.Ctx(ctx).Error().Errs("Failed to Put manifest", errs).Msg("Failed to Put manifest") + } + + if commons.IsEmpty(errs) { + headers.WriteToResponse(w) + } + handleErrors(ctx, errs, w) +} diff --git a/registry/app/api/handler/swagger/swagger.go b/registry/app/api/handler/swagger/swagger.go new file mode 100644 index 000000000..f677b8b54 --- /dev/null +++ b/registry/app/api/handler/swagger/swagger.go @@ -0,0 +1,63 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swagger + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + + "github.com/go-chi/chi/v5" + "github.com/rs/zerolog/log" + httpswagger "github.com/swaggo/http-swagger" +) + +type Handler interface { + http.Handler +} + +func GetSwaggerHandler(base string) Handler { + r := chi.NewRouter() + // Generate OpenAPI specification + swagger, err := artifact.GetSwagger() + if err != nil { + panic(err) + } + + // Serve the OpenAPI specification JSON + r.Get( + fmt.Sprintf("%s/swagger.json", base), http.HandlerFunc( + func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + jsonResponse, _ := json.Marshal(swagger) + _, err2 := w.Write(jsonResponse) + if err2 != nil { + log.Error().Err(err2).Msg("Failed to write response") + } + }, + ), + ) + + r.Get( + fmt.Sprintf("%s/swagger/*", base), httpswagger.Handler( + httpswagger.URL(fmt.Sprintf("%s/swagger.json", base)), // The url pointing to API definition + ), + ) + + return r +} diff --git a/registry/app/api/middleware/auth.go b/registry/app/api/middleware/auth.go new file mode 100644 index 000000000..34a05e58a --- /dev/null +++ b/registry/app/api/middleware/auth.go @@ -0,0 +1,121 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/harness/gitness/app/api/render" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/app/auth" + "github.com/harness/gitness/app/jwt" + "github.com/harness/gitness/registry/app/api/handler/oci" + registryauth "github.com/harness/gitness/registry/app/auth" + + "github.com/rs/zerolog/log" +) + +func OciCheckAuth(url string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + session, _ := request.AuthSessionFrom(ctx) + if session.Principal == auth.AnonymousPrincipal { + scope := getScope(r) + returnUnauthorised(ctx, w, url, scope) + return + } + next.ServeHTTP(w, r) + }, + ) + } +} + +// BlockNonOciSourceToken blocks any request that doesn't have AccessPermissionMetadata. +func BlockNonOciSourceToken(url string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if session, oks := request.AuthSessionFrom(ctx); oks { + if metadata, okt := session.Metadata.(*auth.AccessPermissionMetadata); !okt || + metadata.AccessPermissions.Source != jwt.OciSource { + log.Ctx(ctx).Warn(). + Msg("blocking request - non OCI source tokens are not allowed for usage with oci endpoints") + + scope := getScope(r) + returnUnauthorised(ctx, w, url, scope) + return + } + } + next.ServeHTTP(w, r) + }, + ) + } +} + +func CheckAuth() func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + session, _ := request.AuthSessionFrom(ctx) + if session.Principal == auth.AnonymousPrincipal { + render.Unauthorized(ctx, w) + return + } + next.ServeHTTP(w, r) + }, + ) + } +} + +func getRefsFromName(name string) (spaceRef, repoRef string) { + name = strings.Trim(name, "/") + refs := strings.Split(name, "/") + spaceRef = refs[0] + repoRef = refs[1] + return +} + +func getScope(r *http.Request) string { + var scope string + path := r.URL.Path + if path != "/v2/" && path != "/v2/token" { + paramMap := oci.GetQueryParamMap(r.URL.Query()) + rootIdentifier, registryIdentifier, _, _, _, _ := oci.ExtractPathVars(path, paramMap) + var access []registryauth.Access + access = registryauth.AppendAccess(access, r.Method, rootIdentifier, registryIdentifier) + if fromRepo := r.FormValue("from"); fromRepo != "" { + space, repoName := getRefsFromName(fromRepo) + access = registryauth.AppendAccess(access, http.MethodGet, space, repoName) + } + scope = registryauth.NewAccessSet(access...).ScopeParam() + } + return scope +} + +func returnUnauthorised(ctx context.Context, w http.ResponseWriter, url string, scope string) { + header := fmt.Sprintf(`Bearer realm="%s", service="gitness-registry"`, url) + if scope != "" { + header = fmt.Sprintf(`%s, scope="%s"`, header, scope) + } + w.Header().Set("WWW-Authenticate", header) + render.Unauthorized(ctx, w) +} diff --git a/registry/app/api/openapi/api.yaml b/registry/app/api/openapi/api.yaml new file mode 100644 index 000000000..04a730755 --- /dev/null +++ b/registry/app/api/openapi/api.yaml @@ -0,0 +1,1607 @@ +openapi: "3.0.0" +info: + title: Harness Artifact Registry API + version: 1.0.0 + contact: + name: Harness Artifact Registry - developers + url: https://www.harness.io + +tags: + - name: Registries + description: APIs to create, update, list registries + - name: Artifacts + description: APIs to get, list artifacts + - name: Docker Artifacts + description: APIs to get details of docker artifacts + - name: Helm Artifacts + description: APIs to get details of helm artifacts + + +servers: + - url: /api/v1 + description: Registry + +paths: + /spaces/{space_ref}/registries: + get: + summary: List Registries + description: Lists all the Registries. + operationId: GetAllRegistries + tags: + - Spaces + parameters: + - $ref: "#/components/parameters/spaceRefPathParam" + - $ref: "#/components/parameters/packageTypeParam" + - $ref: "#/components/parameters/RegistryTypeParam" + - $ref: "#/components/parameters/pageNumber" + - $ref: "#/components/parameters/pageSize" + - $ref: "#/components/parameters/sortOrder" + - $ref: "#/components/parameters/sortField" + - $ref: "#/components/parameters/searchTerm" + responses: + 200: + $ref: "#/components/responses/ListRegistryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry: + post: + summary: Create Registry. + description: Create a Registry. + operationId: CreateRegistry + tags: + - Registries + requestBody: + $ref: "#/components/requestBodies/RegistryRequest" + responses: + 201: + $ref: "#/components/responses/RegistryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 500: + $ref: "#/components/responses/InternalServerError" + + /registry/{registry_ref}: + get: + summary: Returns Registry Details + description: Returns Registry Details in the account for the given key + operationId: GetRegistry + tags: + - Registries + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + responses: + 200: + $ref: "#/components/responses/RegistryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + put: + summary: Updates a Registry + description: Updates a Registry in the account for the given key + operationId: ModifyRegistry + tags: + - Registries + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + requestBody: + $ref: "#/components/requestBodies/RegistryRequest" + responses: + 200: + $ref: "#/components/responses/RegistryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + delete: + summary: Delete a Registry + description: Delete a Registry in the account for the given key + operationId: DeleteRegistry + tags: + - Registries + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + responses: + 200: + $ref: "#/components/responses/Success" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/client-setup-details: + get: + summary: Returns CLI Client Setup Details + description: Returns CLI Client Setup Details based on package type + operationId: GetClientSetupDetails + tags: + - Registries + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactParam" + - $ref: "#/components/parameters/versionParam" + responses: + 200: + $ref: "#/components/responses/ClientSetupDetailsResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /spaces/{space_ref}/artifacts: + get: + summary: List Artifacts + description: Lists all the Artifacts. + operationId: GetAllArtifacts + tags: + - Spaces + parameters: + - $ref: "#/components/parameters/spaceRefPathParam" + - $ref: "#/components/parameters/LabelsParam" + - $ref: "#/components/parameters/packageTypeParam" + - $ref: "#/components/parameters/RegistryIdentifierParam" + - $ref: "#/components/parameters/pageNumber" + - $ref: "#/components/parameters/pageSize" + - $ref: "#/components/parameters/sortOrder" + - $ref: "#/components/parameters/sortField" + - $ref: "#/components/parameters/searchTerm" + responses: + 200: + $ref: "#/components/responses/ListArtifactResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /spaces/{space_ref}/artifact/stats: + get: + summary: Get Artifact Stats + description: Get Artifact Stats. + operationId: GetArtifactStatsForSpace + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/spaceRefPathParam" + - $ref: "#/components/parameters/fromDateParam" + - $ref: "#/components/parameters/toDateParam" + responses: + 200: + $ref: "#/components/responses/ArtifactStatsResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/labels: + get: + summary: List Artifact Labels + description: List Artifact Labels. + operationId: ListArtifactLabels + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/pageNumber" + - $ref: "#/components/parameters/pageSize" + - $ref: "#/components/parameters/searchTerm" + responses: + 200: + $ref: "#/components/responses/ListArtifactLabelResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/stats: + get: + summary: Get Artifact Stats + description: Get Artifact Stats. + operationId: GetArtifactStatsForRegistry + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/fromDateParam" + - $ref: "#/components/parameters/toDateParam" + responses: + 200: + $ref: "#/components/responses/ArtifactStatsResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/summary: + get: + summary: Get Artifact Summary + description: Get Artifact Summary. + operationId: GetArtifactSummary + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + responses: + 200: + $ref: "#/components/responses/ArtifactSummaryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/versions: + get: + summary: List Artifact Versions + description: Lists all the Artifact Versions. + operationId: GetAllArtifactVersions + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/pageNumber" + - $ref: "#/components/parameters/pageSize" + - $ref: "#/components/parameters/sortOrder" + - $ref: "#/components/parameters/sortField" + - $ref: "#/components/parameters/searchTerm" + responses: + 200: + $ref: "#/components/responses/ListArtifactVersionResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/labels: + put: + summary: Update Artifact Labels + description: Update Artifact Labels. + operationId: UpdateArtifactLabels + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + requestBody: + $ref: "#/components/requestBodies/ArtifactLabelRequest" + responses: + 200: + $ref: "#/components/responses/ArtifactLabelResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/stats: + get: + summary: Get Artifact Stats + description: Get Artifact Stats. + operationId: GetArtifactStats + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/fromDateParam" + - $ref: "#/components/parameters/toDateParam" + responses: + 200: + $ref: "#/components/responses/ArtifactStatsResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary: + get: + summary: Get Artifact Version Summary + description: Get Artifact Version Summary. + operationId: GetArtifactVersionSummary + tags: + - Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + responses: + 200: + $ref: "#/components/responses/ArtifactVersionSummaryResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details: + get: + summary: Describe Docker Artifact Detail + description: Get Docker Artifact Details + operationId: GetDockerArtifactDetails + tags: + - Docker Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + - $ref: "#/components/parameters/digestParam" + responses: + 200: + $ref: "#/components/responses/DockerArtifactDetailResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest: + get: + summary: Describe Docker Artifact Manifest + description: Get Docker Artifact Manifest + operationId: GetDockerArtifactManifest + tags: + - Docker Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + - $ref: "#/components/parameters/digestParam" + responses: + 200: + $ref: "#/components/responses/DockerArtifactManifestResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests: + get: + summary: Describe Docker Artifact Manifests + description: Get Docker Artifact Manifests + operationId: GetDockerArtifactManifests + tags: + - Docker Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + responses: + 200: + $ref: "#/components/responses/DockerManifestsResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers: + get: + summary: Describe Docker Artifact Layers + description: Get Docker Artifact Layers + operationId: GetDockerArtifactLayers + tags: + - Docker Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + - $ref: "#/components/parameters/digestParam" + responses: + 200: + $ref: "#/components/responses/DockerLayersResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details: + get: + summary: Describe Helm Artifact Detail + description: Get Helm Artifact Details + operationId: GetHelmArtifactDetails + tags: + - Helm Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + responses: + 200: + $ref: "#/components/responses/HelmArtifactDetailResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" + /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest: + get: + summary: Describe Helm Artifact Manifest + description: Get Helm Artifact Manifest + operationId: GetHelmArtifactManifest + tags: + - Helm Artifacts + parameters: + - $ref: "#/components/parameters/registryRefPathParam" + - $ref: "#/components/parameters/artifactPathParam" + - $ref: "#/components/parameters/versionPathParam" + responses: + 200: + $ref: "#/components/responses/HelmArtifactManifestResponse" + 400: + $ref: "#/components/responses/BadRequest" + 401: + $ref: "#/components/responses/Unauthenticated" + 403: + $ref: "#/components/responses/Unauthorized" + 404: + $ref: "#/components/responses/NotFound" + 500: + $ref: "#/components/responses/InternalServerError" +components: + requestBodies: + RegistryRequest: + description: request for create and update registry + content: + application/json: + schema: + $ref: "#/components/schemas/RegistryRequest" + ArtifactLabelRequest: + description: request to update artifact labels + content: + application/json: + schema: + $ref: "#/components/schemas/ArtifactLabelRequest" + responses: + ArtifactStatsResponse: + description: response to get artifact stats response + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ArtifactStats" + required: + - status + - data + ArtifactLabelResponse: + description: response to get artifact label response + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ArtifactSummary" + required: + - status + - data + ClientSetupDetailsResponse: + description: response for client setup details + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ClientSetupDetails" + required: + - status + - data + RegistryResponse: + description: response for create, get and update registry + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/Registry" + required: + - status + - data + DockerArtifactDetailResponse: + description: response to get docker artifact detail + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/DockerArtifactDetail" + required: + - status + - data + HelmArtifactDetailResponse: + description: response to get helm artifact detail + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/HelmArtifactDetail" + required: + - status + - data + ArtifactSummaryResponse: + description: response to get artifact summary + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ArtifactSummary" + required: + - status + - data + ArtifactVersionSummaryResponse: + description: response to get docker artifact version summary + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ArtifactVersionSummary" + required: + - status + - data + DockerArtifactManifestResponse: + description: response to get docker artifact manifest + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/DockerArtifactManifest" + required: + - status + - data + HelmArtifactManifestResponse: + description: response to get helm artifact manifest + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/HelmArtifactManifest" + required: + - status + - data + DockerManifestsResponse: + description: response to get artifact layers + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/DockerManifests" + required: + - status + - data + ListArtifactLabelResponse: + description: response for list artifact labels + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ListArtifactLabel" + required: + - status + - data + DockerLayersResponse: + description: response to get artifact layers + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/DockerLayersSummary" + required: + - status + - data + ListRegistryResponse: + description: response for list registry + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ListRegistry" + required: + - status + - data + ListArtifactResponse: + description: response for list artifact + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ListArtifact" + required: + - status + - data + ListArtifactVersionResponse: + description: response for list versions of artifact + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + data: + $ref: "#/components/schemas/ListArtifactVersion" + required: + - status + - data + Unauthenticated: + description: Unauthenticated + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + Success: + description: Success Response + content: + application/json: + schema: + type: object + properties: + status: + $ref: "#/components/schemas/Status" + required: + - status + Unauthorized: + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + NotFound: + description: The specified resource was not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + InternalServerError: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + BadRequest: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + schemas: + ArtifactStats: + type: object + description: Harness Artifact Stats + properties: + downloadCount: + type: integer + format: int64 + uploadSize: + type: integer + format: int64 + downloadSize: + type: integer + format: int64 + totalStorageSize: + type: integer + format: int64 + ListRegistry: + type: object + description: A list of Harness Artifact Registries + properties: + pageCount: + type: integer + format: int64 + description: The total number of pages + example: 100 + itemCount: + type: integer + format: int64 + description: The total number of items + example: 1 + pageSize: + type: integer + description: The number of items per page + example: 1 + pageIndex: + type: integer + format: int64 + description: The current page + example: 0 + registries: + type: array + description: A list of Harness Artifact Registries + items: + $ref: "#/components/schemas/RegistryMetadata" + required: + - registries + ListArtifact: + type: object + description: A list of Artifacts + properties: + pageCount: + type: integer + format: int64 + description: The total number of pages + example: 100 + itemCount: + type: integer + format: int64 + description: The total number of items + example: 1 + pageSize: + type: integer + description: The number of items per page + example: 1 + pageIndex: + type: integer + format: int64 + description: The current page + example: 0 + artifacts: + type: array + description: A list of Artifact + items: + $ref: "#/components/schemas/ArtifactMetadata" + required: + - artifacts + ListArtifactVersion: + type: object + description: A list of Artifact versions + properties: + pageCount: + type: integer + format: int64 + description: The total number of pages + example: 100 + itemCount: + type: integer + format: int64 + description: The total number of items + example: 1 + pageSize: + type: integer + description: The number of items per page + example: 1 + pageIndex: + type: integer + description: The current page + format: int64 + example: 0 + artifactVersions: + type: array + description: A list of Artifact versions + items: + $ref: "#/components/schemas/ArtifactVersionMetadata" + required: + - artifacts + ListArtifactLabel: + type: object + description: A list of Harness Artifact Labels + properties: + pageCount: + type: integer + description: The total number of pages + format: int64 + example: 100 + itemCount: + type: integer + format: int64 + description: The total number of items + example: 1 + pageSize: + type: integer + description: The number of items per page + example: 1 + pageIndex: + type: integer + format: int64 + description: The current page + example: 0 + labels: + type: array + items: + type: string + required: + - labels + RegistryMetadata: + type: object + description: Harness Artifact Registry Metadata + properties: + type: + $ref: "#/components/schemas/RegistryType" + packageType: + $ref: "#/components/schemas/PackageType" + description: + type: string + url: + type: string + identifier: + type: string + registrySize: + type: string + downloadsCount: + type: integer + format: int64 + artifactsCount: + type: integer + format: int64 + labels: + type: array + items: + type: string + lastModified: + type: string + path: + type: string + required: + - type + - packageType + - identifier + - url + ClientSetupDetails: + type: object + description: Client Setup Details + properties: + mainHeader: + type: string + secHeader: + type: string + sections: + type: array + items: + $ref: "#/components/schemas/ClientSetupSection" + required: + - mainHeader + - secHeader + - sections + ClientSetupSection: + type: object + description: Client Setup Section + properties: + header: + type: string + steps: + type: array + items: + $ref: "#/components/schemas/ClientSetupStep" + ClientSetupStep: + type: object + description: Client Setup Step + properties: + header: + type: string + commands: + type: array + items: + type: string + type: + $ref: "#/components/schemas/ClientSetupStepType" + ArtifactMetadata: + type: object + description: Artifact Metadata + properties: + name: + type: string + registryIdentifier: + type: string + registryPath: + type: string + labels: + type: array + items: + type: string + downloadsCount: + type: integer + format: int64 + latestVersion: + type: string + lastModified: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + required: + - name + - registryIdentifier + - latestVersion + - registryPath + ArtifactVersionMetadata: + type: object + description: Artifact Version Metadata + properties: + name: + type: string + size: + type: string + registryIdentifier: + type: string + registryPath: + type: string + digestCount: + type: integer + format: int64 + pullCommand: + type: string + downloadsCount: + type: integer + format: int64 + islatestVersion: + type: boolean + lastModified: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + required: + - name + - registryIdentifier + - latestVersion + - registryPath + Registry: + type: object + description: Harness Artifact Registry + properties: + cleanupPolicy: + type: array + items: + $ref: "#/components/schemas/CleanupPolicy" + identifier: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + description: + type: string + url: + type: string + allowedPattern: + type: array + items: + type: string + blockedPattern: + type: array + items: + type: string + labels: + type: array + items: + type: string + config: + $ref: '#/components/schemas/RegistryConfig' + createdAt: + type: string + modifiedAt: + type: string + required: + - name + - identifier + - type + - url + - packageType + DockerArtifactDetail: + type: object + description: Docker Artifact Detail + properties: + imageName: + type: string + version: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + registryPath: + type: string + url: + type: string + size: + type: string + downloadsCount: + type: integer + format: int64 + pullCommand: + type: string + createdAt: + type: string + modifiedAt: + type: string + isLatestVersion: + type: boolean + required: + - imageName + - version + - registryPath + - url + - packageType + HelmArtifactDetail: + type: object + description: Helm Artifact Detail + properties: + artifact: + type: string + version: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + registryPath: + type: string + url: + type: string + size: + type: string + downloadsCount: + type: integer + format: int64 + pullCommand: + type: string + createdAt: + type: string + modifiedAt: + type: string + isLatestVersion: + type: boolean + required: + - imageName + - version + - registryPath + - url + - packageType + ArtifactSummary: + type: object + description: Harness Artifact Summary + properties: + imageName: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + labels: + type: array + items: + type: string + downloadsCount: + type: integer + format: int64 + createdAt: + type: string + modifiedAt: + type: string + required: + - imageName + - packageType + ArtifactVersionSummary: + type: object + description: Docker Artifact Version Summary + properties: + imageName: + type: string + version: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + isLatestVersion: + type: boolean + required: + - imageName + - version + - packageType + DockerArtifactManifest: + type: object + description: Docker Artifact Manifest + properties: + manifest: + type: string + required: + - manifest + HelmArtifactManifest: + type: object + description: Helm Artifact Manifest + properties: + manifest: + type: string + required: + - manifest + DockerLayerEntry: + type: object + description: Harness Artifact Layers + properties: + command: + type: string + size: + type: string + required: + - command + DockerManifestDetails: + type: object + description: Harness Artifact Layers + properties: + osArch: + type: string + digest: + type: string + size: + type: string + createdAt: + type: string + required: + - digest + - layers + - osArch + DockerManifests: + type: object + description: Harness Manifests + properties: + imageName: + type: string + version: + type: string + isLatestVersion: + type: boolean + manifests: + type: array + items: + $ref: '#/components/schemas/DockerManifestDetails' + required: + - imageName + - version + DockerLayersSummary: + type: object + description: Harness Layers Summary + properties: + digest: + type: string + osArch: + type: string + layers: + type: array + items: + $ref: '#/components/schemas/DockerLayerEntry' + required: + - digest + RegistryConfig: + type: object + description: SubConfig specific for Virtual or Upstream Registry + required: + - type + properties: + type: + $ref: "#/components/schemas/RegistryType" + discriminator: + propertyName: type + mapping: + VIRTUAL: "#/components/schemas/VirtualConfig" + UPSTREAM: "#/components/schemas/UpstreamConfig" + oneOf: + - $ref: "#/components/schemas/VirtualConfig" + - $ref: "#/components/schemas/UpstreamConfig" + VirtualConfig: + type: object + description: Configuration for Harness Virtual Artifact Registries + properties: + upstreamProxies: + type: array + items: + type: string + UpstreamConfig: + type: object + description: Configuration for Harness Artifact UpstreamProxies + properties: + authType: + $ref: "#/components/schemas/AuthType" + auth: + oneOf: + - $ref: "#/components/schemas/UserPassword" + - $ref: "#/components/schemas/Anonymous" + url: + type: string + source: + type: string + enum: + - Dockerhub + - Custom + x-discriminator-value: UPSTREAM + required: + - authType + CleanupPolicy: + type: object + description: Cleanup Policy for Harness Artifact Registries + properties: + name: + type: string + expireDays: + type: integer + versionPrefix: + type: array + items: + type: string + packagePrefix: + type: array + items: + type: string + RegistryType: + type: string + description: refers to type of registry i.e virtual or upstream + enum: + - VIRTUAL + - UPSTREAM + discriminator: + propertyName: type + PackageType: + type: string + description: refers to package + enum: + - DOCKER + - MAVEN + - GENERIC + - HELM + Status: + type: string + description: "Indicates if the request was successful or not" + enum: + - SUCCESS + - FAILURE + - ERROR + AuthType: + type: string + description: "Authentication type" + enum: + - UserPassword + - Anonymous + ClientSetupStepType: + type: string + description: "ClientSetupStepType type" + enum: + - Static + - GenerateToken + Error: + type: object + properties: + code: + type: string + description: The http error code + example: 404 + message: + type: string + description: The reason the request failed + details: + type: object + description: Additional details about the error + required: + - code + - message + RegistryRequest: + type: object + properties: + identifier: + type: string + packageType: + $ref: "#/components/schemas/PackageType" + description: + type: string + allowedPattern: + type: array + items: + type: string + blockedPattern: + type: array + items: + type: string + cleanupPolicy: + type: array + items: + $ref: "#/components/schemas/CleanupPolicy" + labels: + type: array + items: + type: string + config: + $ref: '#/components/schemas/RegistryConfig' + parentRef: + type: string + required: + - identifier + - type + - packageType + ArtifactLabelRequest: + type: object + properties: + labels: + type: array + items: + type: string + required: + - labels + UserPassword: + properties: + userName: + type: string + secretIdentifier: + type: string + secretSpaceId: + type: integer + required: + - userName + - password + Anonymous: {} + parameters: + packageTypeParam: + name: package_type + in: query + required: false + description: Registry Package Type + schema: + type: array + items: + type: string + RegistryTypeParam: + name: type + in: query + required: false + description: Registry Type + schema: + type: string + enum: + - VIRTUAL + - UPSTREAM + RegistryIdentifierParam: + name: reg_identifier + in: query + required: false + description: Registry Identifier + schema: + type: string + spaceRefPathParam: + name: space_ref + in: path + required: true + description: Unique space path. + schema: + type: string + LabelsParam: + name: label + in: query + required: false + description: Label. + schema: + type: array + items: + type: string + registryRefPathParam: + name: registry_ref + in: path + required: true + description: Unique registry path. + schema: + type: string + artifactParam: + name: artifact + in: query + required: false + description: Artifat + schema: + type: string + versionParam: + name: version + in: query + required: false + description: Version + schema: + type: string + artifactPathParam: + name: artifact + in: path + required: true + description: Name of artifact. + schema: + type: string + versionPathParam: + name: version + in: path + required: true + description: Name of Artifact Version. + schema: + type: string + digestParam: + name: digest + in: query + required: true + description: Digest. + schema: + type: string + searchTerm: + name: search_term + in: query + required: false + description: search Term. + schema: + type: string + pageNumber: + name: page + in: query + required: false + description: Current page number + schema: + type: integer + format: int64 + default: 1 + pageSize: + name: size + in: query + required: false + description: Number of items per page + schema: + type: integer + format: int64 + default: 20 + sortOrder: + name: sort_order + in: query + required: false + description: sortOrder + schema: + type: string + sortField: + name: sort_field + in: query + required: false + description: sortField + schema: + type: string + fromDateParam: + name: from + in: query + required: false + description: Date. Format - MM/DD/YYYY + schema: + type: string + toDateParam: + name: to + in: query + required: false + description: Date. Format - MM/DD/YYYY + schema: + type: string \ No newline at end of file diff --git a/registry/app/api/openapi/contracts/artifact/services.gen.go b/registry/app/api/openapi/contracts/artifact/services.gen.go new file mode 100644 index 000000000..d2526adc6 --- /dev/null +++ b/registry/app/api/openapi/contracts/artifact/services.gen.go @@ -0,0 +1,3803 @@ +// Package artifact provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen/v2 version v2.1.0 DO NOT EDIT. +package artifact + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-chi/chi/v5" + "github.com/oapi-codegen/runtime" + strictnethttp "github.com/oapi-codegen/runtime/strictmiddleware/nethttp" +) + +// ServerInterface represents all server handlers. +type ServerInterface interface { + // Create Registry. + // (POST /registry) + CreateRegistry(w http.ResponseWriter, r *http.Request) + // Delete a Registry + // (DELETE /registry/{registry_ref}) + DeleteRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) + // Returns Registry Details + // (GET /registry/{registry_ref}) + GetRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) + // Updates a Registry + // (PUT /registry/{registry_ref}) + ModifyRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) + // List Artifact Labels + // (GET /registry/{registry_ref}/artifact/labels) + ListArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListArtifactLabelsParams) + // Get Artifact Stats + // (GET /registry/{registry_ref}/artifact/stats) + GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetArtifactStatsForRegistryParams) + // Update Artifact Labels + // (PUT /registry/{registry_ref}/artifact/{artifact}/labels) + UpdateArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) + // Get Artifact Stats + // (GET /registry/{registry_ref}/artifact/{artifact}/stats) + GetArtifactStats(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactStatsParams) + // Get Artifact Summary + // (GET /registry/{registry_ref}/artifact/{artifact}/summary) + GetArtifactSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) + // Describe Docker Artifact Detail + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details) + GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactDetailsParams) + // Describe Docker Artifact Layers + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers) + GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactLayersParams) + // Describe Docker Artifact Manifest + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest) + GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestParams) + // Describe Docker Artifact Manifests + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests) + GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) + // Describe Helm Artifact Detail + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details) + GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) + // Describe Helm Artifact Manifest + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest) + GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) + // Get Artifact Version Summary + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary) + GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) + // List Artifact Versions + // (GET /registry/{registry_ref}/artifact/{artifact}/versions) + GetAllArtifactVersions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetAllArtifactVersionsParams) + // Returns CLI Client Setup Details + // (GET /registry/{registry_ref}/client-setup-details) + GetClientSetupDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetClientSetupDetailsParams) + // Get Artifact Stats + // (GET /spaces/{space_ref}/artifact/stats) + GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetArtifactStatsForSpaceParams) + // List Artifacts + // (GET /spaces/{space_ref}/artifacts) + GetAllArtifacts(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllArtifactsParams) + // List Registries + // (GET /spaces/{space_ref}/registries) + GetAllRegistries(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllRegistriesParams) +} + +// Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint. + +type Unimplemented struct{} + +// Create Registry. +// (POST /registry) +func (_ Unimplemented) CreateRegistry(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Delete a Registry +// (DELETE /registry/{registry_ref}) +func (_ Unimplemented) DeleteRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Returns Registry Details +// (GET /registry/{registry_ref}) +func (_ Unimplemented) GetRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Updates a Registry +// (PUT /registry/{registry_ref}) +func (_ Unimplemented) ModifyRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// List Artifact Labels +// (GET /registry/{registry_ref}/artifact/labels) +func (_ Unimplemented) ListArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListArtifactLabelsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get Artifact Stats +// (GET /registry/{registry_ref}/artifact/stats) +func (_ Unimplemented) GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetArtifactStatsForRegistryParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Update Artifact Labels +// (PUT /registry/{registry_ref}/artifact/{artifact}/labels) +func (_ Unimplemented) UpdateArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get Artifact Stats +// (GET /registry/{registry_ref}/artifact/{artifact}/stats) +func (_ Unimplemented) GetArtifactStats(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactStatsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get Artifact Summary +// (GET /registry/{registry_ref}/artifact/{artifact}/summary) +func (_ Unimplemented) GetArtifactSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Docker Artifact Detail +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details) +func (_ Unimplemented) GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactDetailsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Docker Artifact Layers +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers) +func (_ Unimplemented) GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactLayersParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Docker Artifact Manifest +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest) +func (_ Unimplemented) GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Docker Artifact Manifests +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests) +func (_ Unimplemented) GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Helm Artifact Detail +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details) +func (_ Unimplemented) GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Describe Helm Artifact Manifest +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest) +func (_ Unimplemented) GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get Artifact Version Summary +// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary) +func (_ Unimplemented) GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + w.WriteHeader(http.StatusNotImplemented) +} + +// List Artifact Versions +// (GET /registry/{registry_ref}/artifact/{artifact}/versions) +func (_ Unimplemented) GetAllArtifactVersions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetAllArtifactVersionsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Returns CLI Client Setup Details +// (GET /registry/{registry_ref}/client-setup-details) +func (_ Unimplemented) GetClientSetupDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetClientSetupDetailsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get Artifact Stats +// (GET /spaces/{space_ref}/artifact/stats) +func (_ Unimplemented) GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetArtifactStatsForSpaceParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// List Artifacts +// (GET /spaces/{space_ref}/artifacts) +func (_ Unimplemented) GetAllArtifacts(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllArtifactsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// List Registries +// (GET /spaces/{space_ref}/registries) +func (_ Unimplemented) GetAllRegistries(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllRegistriesParams) { + w.WriteHeader(http.StatusNotImplemented) +} + +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface + HandlerMiddlewares []MiddlewareFunc + ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +type MiddlewareFunc func(http.Handler) http.Handler + +// CreateRegistry operation middleware +func (siw *ServerInterfaceWrapper) CreateRegistry(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.CreateRegistry(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// DeleteRegistry operation middleware +func (siw *ServerInterfaceWrapper) DeleteRegistry(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.DeleteRegistry(w, r, registryRef) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetRegistry operation middleware +func (siw *ServerInterfaceWrapper) GetRegistry(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetRegistry(w, r, registryRef) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// ModifyRegistry operation middleware +func (siw *ServerInterfaceWrapper) ModifyRegistry(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.ModifyRegistry(w, r, registryRef) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// ListArtifactLabels operation middleware +func (siw *ServerInterfaceWrapper) ListArtifactLabels(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params ListArtifactLabelsParams + + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", r.URL.Query(), ¶ms.Page) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "page", Err: err}) + return + } + + // ------------- Optional query parameter "size" ------------- + + err = runtime.BindQueryParameter("form", true, false, "size", r.URL.Query(), ¶ms.Size) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "size", Err: err}) + return + } + + // ------------- Optional query parameter "search_term" ------------- + + err = runtime.BindQueryParameter("form", true, false, "search_term", r.URL.Query(), ¶ms.SearchTerm) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "search_term", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.ListArtifactLabels(w, r, registryRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetArtifactStatsForRegistry operation middleware +func (siw *ServerInterfaceWrapper) GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetArtifactStatsForRegistryParams + + // ------------- Optional query parameter "from" ------------- + + err = runtime.BindQueryParameter("form", true, false, "from", r.URL.Query(), ¶ms.From) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "from", Err: err}) + return + } + + // ------------- Optional query parameter "to" ------------- + + err = runtime.BindQueryParameter("form", true, false, "to", r.URL.Query(), ¶ms.To) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "to", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetArtifactStatsForRegistry(w, r, registryRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// UpdateArtifactLabels operation middleware +func (siw *ServerInterfaceWrapper) UpdateArtifactLabels(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.UpdateArtifactLabels(w, r, registryRef, artifact) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetArtifactStats operation middleware +func (siw *ServerInterfaceWrapper) GetArtifactStats(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetArtifactStatsParams + + // ------------- Optional query parameter "from" ------------- + + err = runtime.BindQueryParameter("form", true, false, "from", r.URL.Query(), ¶ms.From) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "from", Err: err}) + return + } + + // ------------- Optional query parameter "to" ------------- + + err = runtime.BindQueryParameter("form", true, false, "to", r.URL.Query(), ¶ms.To) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "to", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetArtifactStats(w, r, registryRef, artifact, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetArtifactSummary operation middleware +func (siw *ServerInterfaceWrapper) GetArtifactSummary(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetArtifactSummary(w, r, registryRef, artifact) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetDockerArtifactDetails operation middleware +func (siw *ServerInterfaceWrapper) GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetDockerArtifactDetailsParams + + // ------------- Required query parameter "digest" ------------- + + if paramValue := r.URL.Query().Get("digest"); paramValue != "" { + + } else { + siw.ErrorHandlerFunc(w, r, &RequiredParamError{ParamName: "digest"}) + return + } + + err = runtime.BindQueryParameter("form", true, true, "digest", r.URL.Query(), ¶ms.Digest) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "digest", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetDockerArtifactDetails(w, r, registryRef, artifact, version, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetDockerArtifactLayers operation middleware +func (siw *ServerInterfaceWrapper) GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetDockerArtifactLayersParams + + // ------------- Required query parameter "digest" ------------- + + if paramValue := r.URL.Query().Get("digest"); paramValue != "" { + + } else { + siw.ErrorHandlerFunc(w, r, &RequiredParamError{ParamName: "digest"}) + return + } + + err = runtime.BindQueryParameter("form", true, true, "digest", r.URL.Query(), ¶ms.Digest) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "digest", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetDockerArtifactLayers(w, r, registryRef, artifact, version, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetDockerArtifactManifest operation middleware +func (siw *ServerInterfaceWrapper) GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetDockerArtifactManifestParams + + // ------------- Required query parameter "digest" ------------- + + if paramValue := r.URL.Query().Get("digest"); paramValue != "" { + + } else { + siw.ErrorHandlerFunc(w, r, &RequiredParamError{ParamName: "digest"}) + return + } + + err = runtime.BindQueryParameter("form", true, true, "digest", r.URL.Query(), ¶ms.Digest) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "digest", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetDockerArtifactManifest(w, r, registryRef, artifact, version, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetDockerArtifactManifests operation middleware +func (siw *ServerInterfaceWrapper) GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetDockerArtifactManifests(w, r, registryRef, artifact, version) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetHelmArtifactDetails operation middleware +func (siw *ServerInterfaceWrapper) GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetHelmArtifactDetails(w, r, registryRef, artifact, version) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetHelmArtifactManifest operation middleware +func (siw *ServerInterfaceWrapper) GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetHelmArtifactManifest(w, r, registryRef, artifact, version) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetArtifactVersionSummary operation middleware +func (siw *ServerInterfaceWrapper) GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Path parameter "version" ------------- + var version VersionPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "version", chi.URLParam(r, "version"), &version, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetArtifactVersionSummary(w, r, registryRef, artifact, version) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetAllArtifactVersions operation middleware +func (siw *ServerInterfaceWrapper) GetAllArtifactVersions(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // ------------- Path parameter "artifact" ------------- + var artifact ArtifactPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "artifact", chi.URLParam(r, "artifact"), &artifact, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetAllArtifactVersionsParams + + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", r.URL.Query(), ¶ms.Page) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "page", Err: err}) + return + } + + // ------------- Optional query parameter "size" ------------- + + err = runtime.BindQueryParameter("form", true, false, "size", r.URL.Query(), ¶ms.Size) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "size", Err: err}) + return + } + + // ------------- Optional query parameter "sort_order" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_order", r.URL.Query(), ¶ms.SortOrder) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_order", Err: err}) + return + } + + // ------------- Optional query parameter "sort_field" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_field", r.URL.Query(), ¶ms.SortField) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_field", Err: err}) + return + } + + // ------------- Optional query parameter "search_term" ------------- + + err = runtime.BindQueryParameter("form", true, false, "search_term", r.URL.Query(), ¶ms.SearchTerm) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "search_term", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetAllArtifactVersions(w, r, registryRef, artifact, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetClientSetupDetails operation middleware +func (siw *ServerInterfaceWrapper) GetClientSetupDetails(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "registry_ref" ------------- + var registryRef RegistryRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetClientSetupDetailsParams + + // ------------- Optional query parameter "artifact" ------------- + + err = runtime.BindQueryParameter("form", true, false, "artifact", r.URL.Query(), ¶ms.Artifact) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "artifact", Err: err}) + return + } + + // ------------- Optional query parameter "version" ------------- + + err = runtime.BindQueryParameter("form", true, false, "version", r.URL.Query(), ¶ms.Version) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "version", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetClientSetupDetails(w, r, registryRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetArtifactStatsForSpace operation middleware +func (siw *ServerInterfaceWrapper) GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "space_ref" ------------- + var spaceRef SpaceRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "space_ref", chi.URLParam(r, "space_ref"), &spaceRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "space_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetArtifactStatsForSpaceParams + + // ------------- Optional query parameter "from" ------------- + + err = runtime.BindQueryParameter("form", true, false, "from", r.URL.Query(), ¶ms.From) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "from", Err: err}) + return + } + + // ------------- Optional query parameter "to" ------------- + + err = runtime.BindQueryParameter("form", true, false, "to", r.URL.Query(), ¶ms.To) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "to", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetArtifactStatsForSpace(w, r, spaceRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetAllArtifacts operation middleware +func (siw *ServerInterfaceWrapper) GetAllArtifacts(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "space_ref" ------------- + var spaceRef SpaceRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "space_ref", chi.URLParam(r, "space_ref"), &spaceRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "space_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetAllArtifactsParams + + // ------------- Optional query parameter "label" ------------- + + err = runtime.BindQueryParameter("form", true, false, "label", r.URL.Query(), ¶ms.Label) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "label", Err: err}) + return + } + + // ------------- Optional query parameter "package_type" ------------- + + err = runtime.BindQueryParameter("form", true, false, "package_type", r.URL.Query(), ¶ms.PackageType) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "package_type", Err: err}) + return + } + + // ------------- Optional query parameter "reg_identifier" ------------- + + err = runtime.BindQueryParameter("form", true, false, "reg_identifier", r.URL.Query(), ¶ms.RegIdentifier) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "reg_identifier", Err: err}) + return + } + + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", r.URL.Query(), ¶ms.Page) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "page", Err: err}) + return + } + + // ------------- Optional query parameter "size" ------------- + + err = runtime.BindQueryParameter("form", true, false, "size", r.URL.Query(), ¶ms.Size) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "size", Err: err}) + return + } + + // ------------- Optional query parameter "sort_order" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_order", r.URL.Query(), ¶ms.SortOrder) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_order", Err: err}) + return + } + + // ------------- Optional query parameter "sort_field" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_field", r.URL.Query(), ¶ms.SortField) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_field", Err: err}) + return + } + + // ------------- Optional query parameter "search_term" ------------- + + err = runtime.BindQueryParameter("form", true, false, "search_term", r.URL.Query(), ¶ms.SearchTerm) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "search_term", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetAllArtifacts(w, r, spaceRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetAllRegistries operation middleware +func (siw *ServerInterfaceWrapper) GetAllRegistries(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "space_ref" ------------- + var spaceRef SpaceRefPathParam + + err = runtime.BindStyledParameterWithOptions("simple", "space_ref", chi.URLParam(r, "space_ref"), &spaceRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "space_ref", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetAllRegistriesParams + + // ------------- Optional query parameter "package_type" ------------- + + err = runtime.BindQueryParameter("form", true, false, "package_type", r.URL.Query(), ¶ms.PackageType) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "package_type", Err: err}) + return + } + + // ------------- Optional query parameter "type" ------------- + + err = runtime.BindQueryParameter("form", true, false, "type", r.URL.Query(), ¶ms.Type) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "type", Err: err}) + return + } + + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", r.URL.Query(), ¶ms.Page) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "page", Err: err}) + return + } + + // ------------- Optional query parameter "size" ------------- + + err = runtime.BindQueryParameter("form", true, false, "size", r.URL.Query(), ¶ms.Size) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "size", Err: err}) + return + } + + // ------------- Optional query parameter "sort_order" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_order", r.URL.Query(), ¶ms.SortOrder) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_order", Err: err}) + return + } + + // ------------- Optional query parameter "sort_field" ------------- + + err = runtime.BindQueryParameter("form", true, false, "sort_field", r.URL.Query(), ¶ms.SortField) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "sort_field", Err: err}) + return + } + + // ------------- Optional query parameter "search_term" ------------- + + err = runtime.BindQueryParameter("form", true, false, "search_term", r.URL.Query(), ¶ms.SearchTerm) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "search_term", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetAllRegistries(w, r, spaceRef, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +type UnescapedCookieParamError struct { + ParamName string + Err error +} + +func (e *UnescapedCookieParamError) Error() string { + return fmt.Sprintf("error unescaping cookie parameter '%s'", e.ParamName) +} + +func (e *UnescapedCookieParamError) Unwrap() error { + return e.Err +} + +type UnmarshalingParamError struct { + ParamName string + Err error +} + +func (e *UnmarshalingParamError) Error() string { + return fmt.Sprintf("Error unmarshaling parameter %s as JSON: %s", e.ParamName, e.Err.Error()) +} + +func (e *UnmarshalingParamError) Unwrap() error { + return e.Err +} + +type RequiredParamError struct { + ParamName string +} + +func (e *RequiredParamError) Error() string { + return fmt.Sprintf("Query argument %s is required, but not found", e.ParamName) +} + +type RequiredHeaderError struct { + ParamName string + Err error +} + +func (e *RequiredHeaderError) Error() string { + return fmt.Sprintf("Header parameter %s is required, but not found", e.ParamName) +} + +func (e *RequiredHeaderError) Unwrap() error { + return e.Err +} + +type InvalidParamFormatError struct { + ParamName string + Err error +} + +func (e *InvalidParamFormatError) Error() string { + return fmt.Sprintf("Invalid format for parameter %s: %s", e.ParamName, e.Err.Error()) +} + +func (e *InvalidParamFormatError) Unwrap() error { + return e.Err +} + +type TooManyValuesForParamError struct { + ParamName string + Count int +} + +func (e *TooManyValuesForParamError) Error() string { + return fmt.Sprintf("Expected one value for %s, got %d", e.ParamName, e.Count) +} + +// Handler creates http.Handler with routing matching OpenAPI spec. +func Handler(si ServerInterface) http.Handler { + return HandlerWithOptions(si, ChiServerOptions{}) +} + +type ChiServerOptions struct { + BaseURL string + BaseRouter chi.Router + Middlewares []MiddlewareFunc + ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +// HandlerFromMux creates http.Handler with routing matching OpenAPI spec based on the provided mux. +func HandlerFromMux(si ServerInterface, r chi.Router) http.Handler { + return HandlerWithOptions(si, ChiServerOptions{ + BaseRouter: r, + }) +} + +func HandlerFromMuxWithBaseURL(si ServerInterface, r chi.Router, baseURL string) http.Handler { + return HandlerWithOptions(si, ChiServerOptions{ + BaseURL: baseURL, + BaseRouter: r, + }) +} + +// HandlerWithOptions creates http.Handler with additional options +func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handler { + r := options.BaseRouter + + if r == nil { + r = chi.NewRouter() + } + if options.ErrorHandlerFunc == nil { + options.ErrorHandlerFunc = func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusBadRequest) + } + } + wrapper := ServerInterfaceWrapper{ + Handler: si, + HandlerMiddlewares: options.Middlewares, + ErrorHandlerFunc: options.ErrorHandlerFunc, + } + + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/registry", wrapper.CreateRegistry) + }) + r.Group(func(r chi.Router) { + r.Delete(options.BaseURL+"/registry/{registry_ref}", wrapper.DeleteRegistry) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}", wrapper.GetRegistry) + }) + r.Group(func(r chi.Router) { + r.Put(options.BaseURL+"/registry/{registry_ref}", wrapper.ModifyRegistry) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/labels", wrapper.ListArtifactLabels) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/stats", wrapper.GetArtifactStatsForRegistry) + }) + r.Group(func(r chi.Router) { + r.Put(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/labels", wrapper.UpdateArtifactLabels) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/stats", wrapper.GetArtifactStats) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/summary", wrapper.GetArtifactSummary) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details", wrapper.GetDockerArtifactDetails) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers", wrapper.GetDockerArtifactLayers) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest", wrapper.GetDockerArtifactManifest) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests", wrapper.GetDockerArtifactManifests) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details", wrapper.GetHelmArtifactDetails) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest", wrapper.GetHelmArtifactManifest) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/version/{version}/summary", wrapper.GetArtifactVersionSummary) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/artifact/{artifact}/versions", wrapper.GetAllArtifactVersions) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/registry/{registry_ref}/client-setup-details", wrapper.GetClientSetupDetails) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/spaces/{space_ref}/artifact/stats", wrapper.GetArtifactStatsForSpace) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/spaces/{space_ref}/artifacts", wrapper.GetAllArtifacts) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/spaces/{space_ref}/registries", wrapper.GetAllRegistries) + }) + + return r +} + +type ArtifactLabelResponseJSONResponse struct { + // Data Harness Artifact Summary + Data ArtifactSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ArtifactStatsResponseJSONResponse struct { + // Data Harness Artifact Stats + Data ArtifactStats `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ArtifactSummaryResponseJSONResponse struct { + // Data Harness Artifact Summary + Data ArtifactSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ArtifactVersionSummaryResponseJSONResponse struct { + // Data Docker Artifact Version Summary + Data ArtifactVersionSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type BadRequestJSONResponse Error + +type ClientSetupDetailsResponseJSONResponse struct { + // Data Client Setup Details + Data ClientSetupDetails `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type DockerArtifactDetailResponseJSONResponse struct { + // Data Docker Artifact Detail + Data DockerArtifactDetail `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type DockerArtifactManifestResponseJSONResponse struct { + // Data Docker Artifact Manifest + Data DockerArtifactManifest `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type DockerLayersResponseJSONResponse struct { + // Data Harness Layers Summary + Data DockerLayersSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type DockerManifestsResponseJSONResponse struct { + // Data Harness Manifests + Data DockerManifests `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type HelmArtifactDetailResponseJSONResponse struct { + // Data Helm Artifact Detail + Data HelmArtifactDetail `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type HelmArtifactManifestResponseJSONResponse struct { + // Data Helm Artifact Manifest + Data HelmArtifactManifest `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type InternalServerErrorJSONResponse Error + +type ListArtifactLabelResponseJSONResponse struct { + // Data A list of Harness Artifact Labels + Data ListArtifactLabel `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ListArtifactResponseJSONResponse struct { + // Data A list of Artifacts + Data ListArtifact `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ListArtifactVersionResponseJSONResponse struct { + // Data A list of Artifact versions + Data ListArtifactVersion `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type ListRegistryResponseJSONResponse struct { + // Data A list of Harness Artifact Registries + Data ListRegistry `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type NotFoundJSONResponse Error + +type RegistryResponseJSONResponse struct { + // Data Harness Artifact Registry + Data Registry `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type SuccessJSONResponse struct { + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +type UnauthenticatedJSONResponse Error + +type UnauthorizedJSONResponse Error + +type CreateRegistryRequestObject struct { + Body *CreateRegistryJSONRequestBody +} + +type CreateRegistryResponseObject interface { + VisitCreateRegistryResponse(w http.ResponseWriter) error +} + +type CreateRegistry201JSONResponse struct{ RegistryResponseJSONResponse } + +func (response CreateRegistry201JSONResponse) VisitCreateRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(201) + + return json.NewEncoder(w).Encode(response) +} + +type CreateRegistry400JSONResponse struct{ BadRequestJSONResponse } + +func (response CreateRegistry400JSONResponse) VisitCreateRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type CreateRegistry401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response CreateRegistry401JSONResponse) VisitCreateRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type CreateRegistry403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response CreateRegistry403JSONResponse) VisitCreateRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type CreateRegistry500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response CreateRegistry500JSONResponse) VisitCreateRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` +} + +type DeleteRegistryResponseObject interface { + VisitDeleteRegistryResponse(w http.ResponseWriter) error +} + +type DeleteRegistry200JSONResponse struct{ SuccessJSONResponse } + +func (response DeleteRegistry200JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistry400JSONResponse struct{ BadRequestJSONResponse } + +func (response DeleteRegistry400JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistry401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response DeleteRegistry401JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistry403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response DeleteRegistry403JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistry404JSONResponse struct{ NotFoundJSONResponse } + +func (response DeleteRegistry404JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteRegistry500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response DeleteRegistry500JSONResponse) VisitDeleteRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` +} + +type GetRegistryResponseObject interface { + VisitGetRegistryResponse(w http.ResponseWriter) error +} + +type GetRegistry200JSONResponse struct{ RegistryResponseJSONResponse } + +func (response GetRegistry200JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistry400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetRegistry400JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistry401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetRegistry401JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistry403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetRegistry403JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistry404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetRegistry404JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetRegistry500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetRegistry500JSONResponse) VisitGetRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Body *ModifyRegistryJSONRequestBody +} + +type ModifyRegistryResponseObject interface { + VisitModifyRegistryResponse(w http.ResponseWriter) error +} + +type ModifyRegistry200JSONResponse struct{ RegistryResponseJSONResponse } + +func (response ModifyRegistry200JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistry400JSONResponse struct{ BadRequestJSONResponse } + +func (response ModifyRegistry400JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistry401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response ModifyRegistry401JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistry403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response ModifyRegistry403JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistry404JSONResponse struct{ NotFoundJSONResponse } + +func (response ModifyRegistry404JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type ModifyRegistry500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response ModifyRegistry500JSONResponse) VisitModifyRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabelsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Params ListArtifactLabelsParams +} + +type ListArtifactLabelsResponseObject interface { + VisitListArtifactLabelsResponse(w http.ResponseWriter) error +} + +type ListArtifactLabels200JSONResponse struct { + ListArtifactLabelResponseJSONResponse +} + +func (response ListArtifactLabels200JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabels400JSONResponse struct{ BadRequestJSONResponse } + +func (response ListArtifactLabels400JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabels401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response ListArtifactLabels401JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabels403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response ListArtifactLabels403JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabels404JSONResponse struct{ NotFoundJSONResponse } + +func (response ListArtifactLabels404JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type ListArtifactLabels500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response ListArtifactLabels500JSONResponse) VisitListArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Params GetArtifactStatsForRegistryParams +} + +type GetArtifactStatsForRegistryResponseObject interface { + VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error +} + +type GetArtifactStatsForRegistry200JSONResponse struct { + ArtifactStatsResponseJSONResponse +} + +func (response GetArtifactStatsForRegistry200JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistry400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetArtifactStatsForRegistry400JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistry401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetArtifactStatsForRegistry401JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistry403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetArtifactStatsForRegistry403JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistry404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetArtifactStatsForRegistry404JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForRegistry500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetArtifactStatsForRegistry500JSONResponse) VisitGetArtifactStatsForRegistryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabelsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Body *UpdateArtifactLabelsJSONRequestBody +} + +type UpdateArtifactLabelsResponseObject interface { + VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error +} + +type UpdateArtifactLabels200JSONResponse struct { + ArtifactLabelResponseJSONResponse +} + +func (response UpdateArtifactLabels200JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabels400JSONResponse struct{ BadRequestJSONResponse } + +func (response UpdateArtifactLabels400JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabels401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response UpdateArtifactLabels401JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabels403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response UpdateArtifactLabels403JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabels404JSONResponse struct{ NotFoundJSONResponse } + +func (response UpdateArtifactLabels404JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateArtifactLabels500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response UpdateArtifactLabels500JSONResponse) VisitUpdateArtifactLabelsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Params GetArtifactStatsParams +} + +type GetArtifactStatsResponseObject interface { + VisitGetArtifactStatsResponse(w http.ResponseWriter) error +} + +type GetArtifactStats200JSONResponse struct { + ArtifactStatsResponseJSONResponse +} + +func (response GetArtifactStats200JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStats400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetArtifactStats400JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStats401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetArtifactStats401JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStats403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetArtifactStats403JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStats404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetArtifactStats404JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStats500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetArtifactStats500JSONResponse) VisitGetArtifactStatsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummaryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` +} + +type GetArtifactSummaryResponseObject interface { + VisitGetArtifactSummaryResponse(w http.ResponseWriter) error +} + +type GetArtifactSummary200JSONResponse struct { + ArtifactSummaryResponseJSONResponse +} + +func (response GetArtifactSummary200JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummary400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetArtifactSummary400JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummary401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetArtifactSummary401JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummary403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetArtifactSummary403JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummary404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetArtifactSummary404JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactSummary500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetArtifactSummary500JSONResponse) VisitGetArtifactSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetailsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` + Params GetDockerArtifactDetailsParams +} + +type GetDockerArtifactDetailsResponseObject interface { + VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error +} + +type GetDockerArtifactDetails200JSONResponse struct { + DockerArtifactDetailResponseJSONResponse +} + +func (response GetDockerArtifactDetails200JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetails400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetDockerArtifactDetails400JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetails401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetDockerArtifactDetails401JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetails403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetDockerArtifactDetails403JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetails404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetDockerArtifactDetails404JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactDetails500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetDockerArtifactDetails500JSONResponse) VisitGetDockerArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayersRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` + Params GetDockerArtifactLayersParams +} + +type GetDockerArtifactLayersResponseObject interface { + VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error +} + +type GetDockerArtifactLayers200JSONResponse struct { + DockerLayersResponseJSONResponse +} + +func (response GetDockerArtifactLayers200JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayers400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetDockerArtifactLayers400JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayers401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetDockerArtifactLayers401JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayers403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetDockerArtifactLayers403JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayers404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetDockerArtifactLayers404JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactLayers500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetDockerArtifactLayers500JSONResponse) VisitGetDockerArtifactLayersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifestRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` + Params GetDockerArtifactManifestParams +} + +type GetDockerArtifactManifestResponseObject interface { + VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error +} + +type GetDockerArtifactManifest200JSONResponse struct { + DockerArtifactManifestResponseJSONResponse +} + +func (response GetDockerArtifactManifest200JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifest400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetDockerArtifactManifest400JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifest401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetDockerArtifactManifest401JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifest403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetDockerArtifactManifest403JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifest404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetDockerArtifactManifest404JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifest500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetDockerArtifactManifest500JSONResponse) VisitGetDockerArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifestsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` +} + +type GetDockerArtifactManifestsResponseObject interface { + VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error +} + +type GetDockerArtifactManifests200JSONResponse struct { + DockerManifestsResponseJSONResponse +} + +func (response GetDockerArtifactManifests200JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifests400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetDockerArtifactManifests400JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifests401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetDockerArtifactManifests401JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifests403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetDockerArtifactManifests403JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifests404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetDockerArtifactManifests404JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetDockerArtifactManifests500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetDockerArtifactManifests500JSONResponse) VisitGetDockerArtifactManifestsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetailsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` +} + +type GetHelmArtifactDetailsResponseObject interface { + VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error +} + +type GetHelmArtifactDetails200JSONResponse struct { + HelmArtifactDetailResponseJSONResponse +} + +func (response GetHelmArtifactDetails200JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetails400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetHelmArtifactDetails400JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetails401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetHelmArtifactDetails401JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetails403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetHelmArtifactDetails403JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetails404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetHelmArtifactDetails404JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactDetails500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetHelmArtifactDetails500JSONResponse) VisitGetHelmArtifactDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifestRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` +} + +type GetHelmArtifactManifestResponseObject interface { + VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error +} + +type GetHelmArtifactManifest200JSONResponse struct { + HelmArtifactManifestResponseJSONResponse +} + +func (response GetHelmArtifactManifest200JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifest400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetHelmArtifactManifest400JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifest401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetHelmArtifactManifest401JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifest403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetHelmArtifactManifest403JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifest404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetHelmArtifactManifest404JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetHelmArtifactManifest500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetHelmArtifactManifest500JSONResponse) VisitGetHelmArtifactManifestResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummaryRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Version VersionPathParam `json:"version"` +} + +type GetArtifactVersionSummaryResponseObject interface { + VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error +} + +type GetArtifactVersionSummary200JSONResponse struct { + ArtifactVersionSummaryResponseJSONResponse +} + +func (response GetArtifactVersionSummary200JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummary400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetArtifactVersionSummary400JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummary401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetArtifactVersionSummary401JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummary403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetArtifactVersionSummary403JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummary404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetArtifactVersionSummary404JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactVersionSummary500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetArtifactVersionSummary500JSONResponse) VisitGetArtifactVersionSummaryResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersionsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Artifact ArtifactPathParam `json:"artifact"` + Params GetAllArtifactVersionsParams +} + +type GetAllArtifactVersionsResponseObject interface { + VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error +} + +type GetAllArtifactVersions200JSONResponse struct { + ListArtifactVersionResponseJSONResponse +} + +func (response GetAllArtifactVersions200JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersions400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetAllArtifactVersions400JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersions401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetAllArtifactVersions401JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersions403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetAllArtifactVersions403JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersions404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetAllArtifactVersions404JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactVersions500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetAllArtifactVersions500JSONResponse) VisitGetAllArtifactVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetailsRequestObject struct { + RegistryRef RegistryRefPathParam `json:"registry_ref"` + Params GetClientSetupDetailsParams +} + +type GetClientSetupDetailsResponseObject interface { + VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error +} + +type GetClientSetupDetails200JSONResponse struct { + ClientSetupDetailsResponseJSONResponse +} + +func (response GetClientSetupDetails200JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetails400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetClientSetupDetails400JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetails401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetClientSetupDetails401JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetails403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetClientSetupDetails403JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetails404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetClientSetupDetails404JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetClientSetupDetails500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetClientSetupDetails500JSONResponse) VisitGetClientSetupDetailsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpaceRequestObject struct { + SpaceRef SpaceRefPathParam `json:"space_ref"` + Params GetArtifactStatsForSpaceParams +} + +type GetArtifactStatsForSpaceResponseObject interface { + VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error +} + +type GetArtifactStatsForSpace200JSONResponse struct { + ArtifactStatsResponseJSONResponse +} + +func (response GetArtifactStatsForSpace200JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpace400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetArtifactStatsForSpace400JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpace401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetArtifactStatsForSpace401JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpace403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetArtifactStatsForSpace403JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpace404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetArtifactStatsForSpace404JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetArtifactStatsForSpace500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetArtifactStatsForSpace500JSONResponse) VisitGetArtifactStatsForSpaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifactsRequestObject struct { + SpaceRef SpaceRefPathParam `json:"space_ref"` + Params GetAllArtifactsParams +} + +type GetAllArtifactsResponseObject interface { + VisitGetAllArtifactsResponse(w http.ResponseWriter) error +} + +type GetAllArtifacts200JSONResponse struct { + ListArtifactResponseJSONResponse +} + +func (response GetAllArtifacts200JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifacts400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetAllArtifacts400JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifacts401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetAllArtifacts401JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifacts403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetAllArtifacts403JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifacts404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetAllArtifacts404JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllArtifacts500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetAllArtifacts500JSONResponse) VisitGetAllArtifactsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistriesRequestObject struct { + SpaceRef SpaceRefPathParam `json:"space_ref"` + Params GetAllRegistriesParams +} + +type GetAllRegistriesResponseObject interface { + VisitGetAllRegistriesResponse(w http.ResponseWriter) error +} + +type GetAllRegistries200JSONResponse struct { + ListRegistryResponseJSONResponse +} + +func (response GetAllRegistries200JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistries400JSONResponse struct{ BadRequestJSONResponse } + +func (response GetAllRegistries400JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistries401JSONResponse struct{ UnauthenticatedJSONResponse } + +func (response GetAllRegistries401JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistries403JSONResponse struct{ UnauthorizedJSONResponse } + +func (response GetAllRegistries403JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistries404JSONResponse struct{ NotFoundJSONResponse } + +func (response GetAllRegistries404JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetAllRegistries500JSONResponse struct { + InternalServerErrorJSONResponse +} + +func (response GetAllRegistries500JSONResponse) VisitGetAllRegistriesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +// StrictServerInterface represents all server handlers. +type StrictServerInterface interface { + // Create Registry. + // (POST /registry) + CreateRegistry(ctx context.Context, request CreateRegistryRequestObject) (CreateRegistryResponseObject, error) + // Delete a Registry + // (DELETE /registry/{registry_ref}) + DeleteRegistry(ctx context.Context, request DeleteRegistryRequestObject) (DeleteRegistryResponseObject, error) + // Returns Registry Details + // (GET /registry/{registry_ref}) + GetRegistry(ctx context.Context, request GetRegistryRequestObject) (GetRegistryResponseObject, error) + // Updates a Registry + // (PUT /registry/{registry_ref}) + ModifyRegistry(ctx context.Context, request ModifyRegistryRequestObject) (ModifyRegistryResponseObject, error) + // List Artifact Labels + // (GET /registry/{registry_ref}/artifact/labels) + ListArtifactLabels(ctx context.Context, request ListArtifactLabelsRequestObject) (ListArtifactLabelsResponseObject, error) + // Get Artifact Stats + // (GET /registry/{registry_ref}/artifact/stats) + GetArtifactStatsForRegistry(ctx context.Context, request GetArtifactStatsForRegistryRequestObject) (GetArtifactStatsForRegistryResponseObject, error) + // Update Artifact Labels + // (PUT /registry/{registry_ref}/artifact/{artifact}/labels) + UpdateArtifactLabels(ctx context.Context, request UpdateArtifactLabelsRequestObject) (UpdateArtifactLabelsResponseObject, error) + // Get Artifact Stats + // (GET /registry/{registry_ref}/artifact/{artifact}/stats) + GetArtifactStats(ctx context.Context, request GetArtifactStatsRequestObject) (GetArtifactStatsResponseObject, error) + // Get Artifact Summary + // (GET /registry/{registry_ref}/artifact/{artifact}/summary) + GetArtifactSummary(ctx context.Context, request GetArtifactSummaryRequestObject) (GetArtifactSummaryResponseObject, error) + // Describe Docker Artifact Detail + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details) + GetDockerArtifactDetails(ctx context.Context, request GetDockerArtifactDetailsRequestObject) (GetDockerArtifactDetailsResponseObject, error) + // Describe Docker Artifact Layers + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers) + GetDockerArtifactLayers(ctx context.Context, request GetDockerArtifactLayersRequestObject) (GetDockerArtifactLayersResponseObject, error) + // Describe Docker Artifact Manifest + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest) + GetDockerArtifactManifest(ctx context.Context, request GetDockerArtifactManifestRequestObject) (GetDockerArtifactManifestResponseObject, error) + // Describe Docker Artifact Manifests + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests) + GetDockerArtifactManifests(ctx context.Context, request GetDockerArtifactManifestsRequestObject) (GetDockerArtifactManifestsResponseObject, error) + // Describe Helm Artifact Detail + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details) + GetHelmArtifactDetails(ctx context.Context, request GetHelmArtifactDetailsRequestObject) (GetHelmArtifactDetailsResponseObject, error) + // Describe Helm Artifact Manifest + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest) + GetHelmArtifactManifest(ctx context.Context, request GetHelmArtifactManifestRequestObject) (GetHelmArtifactManifestResponseObject, error) + // Get Artifact Version Summary + // (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary) + GetArtifactVersionSummary(ctx context.Context, request GetArtifactVersionSummaryRequestObject) (GetArtifactVersionSummaryResponseObject, error) + // List Artifact Versions + // (GET /registry/{registry_ref}/artifact/{artifact}/versions) + GetAllArtifactVersions(ctx context.Context, request GetAllArtifactVersionsRequestObject) (GetAllArtifactVersionsResponseObject, error) + // Returns CLI Client Setup Details + // (GET /registry/{registry_ref}/client-setup-details) + GetClientSetupDetails(ctx context.Context, request GetClientSetupDetailsRequestObject) (GetClientSetupDetailsResponseObject, error) + // Get Artifact Stats + // (GET /spaces/{space_ref}/artifact/stats) + GetArtifactStatsForSpace(ctx context.Context, request GetArtifactStatsForSpaceRequestObject) (GetArtifactStatsForSpaceResponseObject, error) + // List Artifacts + // (GET /spaces/{space_ref}/artifacts) + GetAllArtifacts(ctx context.Context, request GetAllArtifactsRequestObject) (GetAllArtifactsResponseObject, error) + // List Registries + // (GET /spaces/{space_ref}/registries) + GetAllRegistries(ctx context.Context, request GetAllRegistriesRequestObject) (GetAllRegistriesResponseObject, error) +} + +type StrictHandlerFunc = strictnethttp.StrictHTTPHandlerFunc +type StrictMiddlewareFunc = strictnethttp.StrictHTTPMiddlewareFunc + +type StrictHTTPServerOptions struct { + RequestErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) + ResponseErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +func NewStrictHandler(ssi StrictServerInterface, middlewares []StrictMiddlewareFunc) ServerInterface { + return &strictHandler{ssi: ssi, middlewares: middlewares, options: StrictHTTPServerOptions{ + RequestErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusBadRequest) + }, + ResponseErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusInternalServerError) + }, + }} +} + +func NewStrictHandlerWithOptions(ssi StrictServerInterface, middlewares []StrictMiddlewareFunc, options StrictHTTPServerOptions) ServerInterface { + return &strictHandler{ssi: ssi, middlewares: middlewares, options: options} +} + +type strictHandler struct { + ssi StrictServerInterface + middlewares []StrictMiddlewareFunc + options StrictHTTPServerOptions +} + +// CreateRegistry operation middleware +func (sh *strictHandler) CreateRegistry(w http.ResponseWriter, r *http.Request) { + var request CreateRegistryRequestObject + + var body CreateRegistryJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode JSON body: %w", err)) + return + } + request.Body = &body + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.CreateRegistry(ctx, request.(CreateRegistryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CreateRegistry") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(CreateRegistryResponseObject); ok { + if err := validResponse.VisitCreateRegistryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// DeleteRegistry operation middleware +func (sh *strictHandler) DeleteRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + var request DeleteRegistryRequestObject + + request.RegistryRef = registryRef + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.DeleteRegistry(ctx, request.(DeleteRegistryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DeleteRegistry") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(DeleteRegistryResponseObject); ok { + if err := validResponse.VisitDeleteRegistryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetRegistry operation middleware +func (sh *strictHandler) GetRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + var request GetRegistryRequestObject + + request.RegistryRef = registryRef + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetRegistry(ctx, request.(GetRegistryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetRegistry") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetRegistryResponseObject); ok { + if err := validResponse.VisitGetRegistryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// ModifyRegistry operation middleware +func (sh *strictHandler) ModifyRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) { + var request ModifyRegistryRequestObject + + request.RegistryRef = registryRef + + var body ModifyRegistryJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode JSON body: %w", err)) + return + } + request.Body = &body + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.ModifyRegistry(ctx, request.(ModifyRegistryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ModifyRegistry") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(ModifyRegistryResponseObject); ok { + if err := validResponse.VisitModifyRegistryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// ListArtifactLabels operation middleware +func (sh *strictHandler) ListArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListArtifactLabelsParams) { + var request ListArtifactLabelsRequestObject + + request.RegistryRef = registryRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.ListArtifactLabels(ctx, request.(ListArtifactLabelsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListArtifactLabels") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(ListArtifactLabelsResponseObject); ok { + if err := validResponse.VisitListArtifactLabelsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetArtifactStatsForRegistry operation middleware +func (sh *strictHandler) GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetArtifactStatsForRegistryParams) { + var request GetArtifactStatsForRegistryRequestObject + + request.RegistryRef = registryRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetArtifactStatsForRegistry(ctx, request.(GetArtifactStatsForRegistryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetArtifactStatsForRegistry") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetArtifactStatsForRegistryResponseObject); ok { + if err := validResponse.VisitGetArtifactStatsForRegistryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// UpdateArtifactLabels operation middleware +func (sh *strictHandler) UpdateArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) { + var request UpdateArtifactLabelsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + + var body UpdateArtifactLabelsJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode JSON body: %w", err)) + return + } + request.Body = &body + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.UpdateArtifactLabels(ctx, request.(UpdateArtifactLabelsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "UpdateArtifactLabels") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(UpdateArtifactLabelsResponseObject); ok { + if err := validResponse.VisitUpdateArtifactLabelsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetArtifactStats operation middleware +func (sh *strictHandler) GetArtifactStats(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactStatsParams) { + var request GetArtifactStatsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetArtifactStats(ctx, request.(GetArtifactStatsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetArtifactStats") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetArtifactStatsResponseObject); ok { + if err := validResponse.VisitGetArtifactStatsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetArtifactSummary operation middleware +func (sh *strictHandler) GetArtifactSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam) { + var request GetArtifactSummaryRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetArtifactSummary(ctx, request.(GetArtifactSummaryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetArtifactSummary") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetArtifactSummaryResponseObject); ok { + if err := validResponse.VisitGetArtifactSummaryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetDockerArtifactDetails operation middleware +func (sh *strictHandler) GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactDetailsParams) { + var request GetDockerArtifactDetailsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetDockerArtifactDetails(ctx, request.(GetDockerArtifactDetailsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetDockerArtifactDetails") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetDockerArtifactDetailsResponseObject); ok { + if err := validResponse.VisitGetDockerArtifactDetailsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetDockerArtifactLayers operation middleware +func (sh *strictHandler) GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactLayersParams) { + var request GetDockerArtifactLayersRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetDockerArtifactLayers(ctx, request.(GetDockerArtifactLayersRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetDockerArtifactLayers") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetDockerArtifactLayersResponseObject); ok { + if err := validResponse.VisitGetDockerArtifactLayersResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetDockerArtifactManifest operation middleware +func (sh *strictHandler) GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestParams) { + var request GetDockerArtifactManifestRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetDockerArtifactManifest(ctx, request.(GetDockerArtifactManifestRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetDockerArtifactManifest") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetDockerArtifactManifestResponseObject); ok { + if err := validResponse.VisitGetDockerArtifactManifestResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetDockerArtifactManifests operation middleware +func (sh *strictHandler) GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + var request GetDockerArtifactManifestsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetDockerArtifactManifests(ctx, request.(GetDockerArtifactManifestsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetDockerArtifactManifests") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetDockerArtifactManifestsResponseObject); ok { + if err := validResponse.VisitGetDockerArtifactManifestsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetHelmArtifactDetails operation middleware +func (sh *strictHandler) GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + var request GetHelmArtifactDetailsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetHelmArtifactDetails(ctx, request.(GetHelmArtifactDetailsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetHelmArtifactDetails") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetHelmArtifactDetailsResponseObject); ok { + if err := validResponse.VisitGetHelmArtifactDetailsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetHelmArtifactManifest operation middleware +func (sh *strictHandler) GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + var request GetHelmArtifactManifestRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetHelmArtifactManifest(ctx, request.(GetHelmArtifactManifestRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetHelmArtifactManifest") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetHelmArtifactManifestResponseObject); ok { + if err := validResponse.VisitGetHelmArtifactManifestResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetArtifactVersionSummary operation middleware +func (sh *strictHandler) GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) { + var request GetArtifactVersionSummaryRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Version = version + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetArtifactVersionSummary(ctx, request.(GetArtifactVersionSummaryRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetArtifactVersionSummary") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetArtifactVersionSummaryResponseObject); ok { + if err := validResponse.VisitGetArtifactVersionSummaryResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetAllArtifactVersions operation middleware +func (sh *strictHandler) GetAllArtifactVersions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetAllArtifactVersionsParams) { + var request GetAllArtifactVersionsRequestObject + + request.RegistryRef = registryRef + request.Artifact = artifact + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetAllArtifactVersions(ctx, request.(GetAllArtifactVersionsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetAllArtifactVersions") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetAllArtifactVersionsResponseObject); ok { + if err := validResponse.VisitGetAllArtifactVersionsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetClientSetupDetails operation middleware +func (sh *strictHandler) GetClientSetupDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetClientSetupDetailsParams) { + var request GetClientSetupDetailsRequestObject + + request.RegistryRef = registryRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetClientSetupDetails(ctx, request.(GetClientSetupDetailsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetClientSetupDetails") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetClientSetupDetailsResponseObject); ok { + if err := validResponse.VisitGetClientSetupDetailsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetArtifactStatsForSpace operation middleware +func (sh *strictHandler) GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetArtifactStatsForSpaceParams) { + var request GetArtifactStatsForSpaceRequestObject + + request.SpaceRef = spaceRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetArtifactStatsForSpace(ctx, request.(GetArtifactStatsForSpaceRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetArtifactStatsForSpace") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetArtifactStatsForSpaceResponseObject); ok { + if err := validResponse.VisitGetArtifactStatsForSpaceResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetAllArtifacts operation middleware +func (sh *strictHandler) GetAllArtifacts(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllArtifactsParams) { + var request GetAllArtifactsRequestObject + + request.SpaceRef = spaceRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetAllArtifacts(ctx, request.(GetAllArtifactsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetAllArtifacts") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetAllArtifactsResponseObject); ok { + if err := validResponse.VisitGetAllArtifactsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetAllRegistries operation middleware +func (sh *strictHandler) GetAllRegistries(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllRegistriesParams) { + var request GetAllRegistriesRequestObject + + request.SpaceRef = spaceRef + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetAllRegistries(ctx, request.(GetAllRegistriesRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetAllRegistries") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetAllRegistriesResponseObject); ok { + if err := validResponse.VisitGetAllRegistriesResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/+xdX2/bOBL/KgLvHtU43e3dg9+ySdoGl7SB3RQoFkXBSGNbW/1bkkqaDfzdDyRFiZZI", + "iXL8r6jfEmvIGc78Zjgih9QzCrIkz1JIGUXjZ5RjghNgQMR/1/geYnrLf+P/hkADEuUsylI0lg9PkI8i", + "/t/fBZAn5KMUJ4DGKOYPkY9osIAE88YRg0R0yp5yTkEZidI5WvrqB0wIfkLLpY8mMI8oI09XIaQsmkVA", + "LCIoQq+mtMhDYP4t0olqwRryaPw/PeXQx5nTWHgy+ajmBGmRoPGf6PPV5NPd2TXy0d3t9NPk8uwGffUN", + "cmDCohkOmEWGM/GYWbirxj1jrXmwhYXPB5yAl808RVrZPMdsYWRI4O8iIhCiMSMFdAsQRnOgtiFeiIc2", + "kMmmA/nNSJZcYGYzLH904r3NSIKZ98q7uRldXIy+fPnyxSID765HxTkOvuM5uKDpVpJ2oars7VsLXQMc", + "LMdz+FAk90DaspwXhEDKPE7jpZLIJsl8VYIQZriIGRq/9tFMaBCNUZSy/75BlRBRymAOpBJjGv0DBsgJ", + "vhx0YlReDsQr2ZkkobwToyS/nbqJQkoLTGDW4Ql3afR3AZ4i9rgDWLxB0XwjMBuIUAqYBItPQAwSyGce", + "f2jzCknyjfH2PYwywt5GEIcGPtUjC5OMsG+zkqCPx0cSmpBWP+rgkZUEnTxyHICT5QRll9kEwRo2Y9kG", + "YwrLegb8AIRGWWph91k+tXT+UD114tA7K5yVYd8r2VoUW7N1V+tSEgNlf2RhBCK4KXYi+5jIp/z3IEsZ", + "pOJPnOdxFGAu5+gvyoV91pj8mxt3jP41qhOfkXxKR8bOhRyrYy+l8ljmFXmIGVRzoycSH4q0LGLTQjb7", + "7ZBvlhEvICAETEMlqwpNZeCjeZZSo3Llk0GC5yTLgbDSWCFmzjqfFkmCuVA+ogyzgvY1nEoqhRIJqT9V", + "Y18yr/Oq7P4vCCzakgPl5pwDa9jSU4+5ZJWwDDO6awVxnoekHt4VNatH2vKIIFqLpKQsw+R+VLTK/AA0", + "FWbBdyC1wsppQlfcHzjcdAi9JCQjJvH+wKFHVFz10XkcQcqmwIr8AhiO4l35fJvxPm0lphEhkUe5SF5Y", + "y3QhDKjwJYXdkZJMrA8Q0mEl2KrANziNZkDZXrSlmB+gvhJNNCn0NX4CQneqJ8nyIHMSLlitG2XI3aqn", + "4nqYqnkPcbKXkNRmfAAKWkCcmMKRLuyOg5GJ9cFpSg9EVykDkuJ4CuQBiMwftp6NKKYeFVw9kIQ+uo4o", + "28e7WovvvrOSOKLM9O6tC7oH3RyUWpr6KN8B9qAWtTB1CNopXzSovrGhNFWvsOxMRZNqTeYAdEM0YT5k", + "7G1WpOH2o92nBXg0hyCaRcDfwmhWkAC8R0y9NGPeTEixsq62E+scimXkOp4vUx7TYp6PpkUQAKUvUMgm", + "BugyslJSb6ItHt2luGALSBkXFnYAuCbDSoaMRP/sToCSm9jIkC3EUmyapU9JJoyhrR41l71XzVdOgMM2", + "A3UTlh20TVhLcAMMK78x7UcHzKtI/Ka/ZY9pnOGQnmeF1Grv3py/xqB4G8puslCEEmODGDOg1YRkopD7", + "FoYH2nZun+VvNVJtk1GrVjAxUGS3mC3MuzO6yYScxr6bo2z03GVkudbcsvB7TFLutpWlJZ3NzEOsrNqo", + "/WCHJixjOJ6yjGjbyA7NinwQn2WXmsr1AQdFlZRNVcmoHp4xIxLW8pcowXP4YEPvOt6UlJ5kkXJdh2gA", + "uZZ7tcsunJbQdohJJWVHbBLFJOtAdph1qC303GdZDDh1Cl9bCE55EcfnWZLgNOyMSi8LXr6s1dhPVGvs", + "frT36eVKZAszNuft9rSIXvdben17PVjnrg6/qnfhez2sYAslVcOl6qSJK6esQ1K1bXcUyC2m9DEjIfK1", + "NKZd4eajc66GIr/N4igw2KN87MnnIgtuBdYyPecWaZoHfuQRgQv8pEc5zRX7nOiWwCz6MSxUqrqJwU1N", + "04xhF8igI7EvI4g8RdXURIKj9D3g0OKzFILup5zX6pzhuHk1lW17805NQF0cjfnXbv0oRt36UVRN/Sw6", + "Rs8gX2/oDPLBZhaNesbASVpphAzcA+f1jmEzh4DUkLua1PsGaA4rBqJmbOHZZhQgH72DFAhm8Cn7Dqkx", + "shj3BnsDfkm39yTNaerYUlbmngkMneJ9VJDY+PsL57IVgSSX/gnOsh/ai5GKsh1k6y66R1FR2uUS+4+X", + "KXN6uZCblbag8IIsTPXQIyftfQ+SZNZEqizkNr+vPJWHEZwCcEt7hrCX0TMSOLxZl1LZB6+gYJ2cnS3V", + "HWDs2rEOxdXCVQl9rCQru+wfdcd4a5ItpMyJzn8AKJrWsmdv68Uhk8aqjcKmZ4aGGfDTArwFY7nc5/ME", + "kY/gB07yGND4zekbw0QX2rB3FoYR/xPHqlzHw/dZwTy2gHIn0SBwApTiuUU4ApjyjF/8WVaY4iiGEPm9", + "gUSMRfVuUpVh67yNL4iT3tm62s0xIWwbU/lxst76ZG2sF+iBx7Yn6pUN37b7ya00rUCeWpFKXZoj3y3Y", + "tdbpDXGOd1RBvO3nYmG1PPtTncHRQ5HL+R55vGcAF06+yuX01JnPVRrCDzOfQDvQpHfv3rn5jBLvO7Wf", + "U9KVZTxwVMOsxkEfzmTRRQdaDBmHWO9tzcM7QcA6i81H1DiipmOvzlR04RBiqqIIa6T6rAgG9jYocjUX", + "9Y8B7OcPYFUVxZDY1bG2ewTAfgFQ5XHqDX4tmzqFBQUdezxooFGTzATH29Vcu1lyMwNCPZZ5ZUqqLT9e", + "fDz/3+UE+ejm7PPlB+Sjd5cfLidX58hH7y+vb4zrkHbc2zTTXh3BcZw9QniLGQOSDptN72P+7rte26C5", + "O+O4BK63MnWbpbNo7mr1c0nd/+qmK9e0tNC9bXlA2+K2Ny/zDunKpRblernbO1VDwy2ATot7+UhV5QVi", + "B+5zRFiBYy8j3l1OGQGc6NANI95HEqWYyZWPBOc5l3/8XN92YVGD6q+UyK8uyrDQl6LUECn95umDdgHH", + "0kdZCh9naPxntxGavXVTN2Rdfm16rcsOin7RSMvArM9w9qoHa2yxlz9UU/igCogep1tvPWXzntpbT/GC", + "9Rfbuopa55ja1leGA8QxMpRRQB9TI07wbrqQZS0wPE5F+5psXoJQnjxOOH3vip1hMnGdRvryKd6QZ4TV", + "5SnRCXgP9WRSlAHVNIdY4vqg+5R8NK2Km5vHbEJRAUy9aLayuP2IqUdlpfKsEEKmGdP3o+/Ozy+nU+Sj", + "t2dX13eTS+Sjy8nk48TIvjFjtDfAxe8FkWU1xmIX1cUtyX6Y3opwIYOR24S3UqrTN9/VhTzLr0tfcHLB", + "YlVGJK6CKUgA+kVYcmNmUdwjH50XlGWJUXNOUa+SqAVSH/14tQKpVw84LjhBBRduHl0b7cJ4CAiwnto3", + "STTNcQBXobnmqKBALNtfjfFUlNwFS7G45lfTlAEoUomby+t10QDaiwqZ+E9ROstUQX25Xl5eDGPPVV55", + "ITxAzOWi5aQ1RgvGcjoejR4fH08WsulJlAkxIhZ3d3h2e6VtRYzR65PTk1ORIOaQ4jxCY/S7+ElO62K0", + "I6K9weWZadvhvLxhpWJ0gkSX0gYcCSWJlibX99o82Vxo5eqb9q0vjatbfjt9be+opBu1js4sffTm9LS/", + "oXYHg2jiwMtwuuLN6e+u7dShCB/9x0U+0/FMcaBCFSYoI+kmYnhOuadpfvCVN6pMPnrWb/FaSsvHwAzz", + "3IX4XcOAF8mNUhwEPPcVnsj/n0cPkHrf4amFEdmFvgqg3cVoCeU1ych4gxl/LWnAxEGb6hzRT4CON6dv", + "+htVZ9g2B6eWvW148tEcmOnGP1aQlNZwKasShsPmHbBDwMzPGFr2BR6b8e0YygsDhu7ECUD6oqAj3oqf", + "tgGgjc9vRxBuFIRt9KwxJY7UstGofqc1xrvriLLmpnQ7TWptddMNIdLvbaddiepILRZ2HGi16zzXC632", + "2x6O8LbC2wQ4DeB1UY4jvqk6EWmE9ztgjUORJ6aJeuV45duMbDju9mNx9fJjhwb6vabrodd8ZeIRuVbk", + "trH0Etw+q7+WWojuSCf6g7Sk21OYbl9Xvla2Yb7o9SXwPgbmQXnHJkOzBvHNR+l9IvsYz4/xvAvs9akb", + "B7hL4m7A18dz9hrM14Zk407dIygdQVnZfROwLNfXR8/lH8uRvOFzpB1VseLVfCKTmlBrOuJ54OG6dau+", + "Qxv9+yjruUfnFb1HH+lY4+XovAfPekxYuUuDYKNeUx+AdHaa6pRhj8/UpxGPLmNymca9w0dXGe4qFcR2", + "4Sr6wS5nZ9GOifW4i36g7OgwXXNM6zbho+sMdx0Nbrt0HrqW91B39/npJpwXOET7dvajJ6zvCVufRxYQ", + "J06vKaaD6EYXaJ9q/zXg33ED/9ED+j3ActGBAv/K4w1C3ymBsp6y7wT/z5o8vRj9x1zoxfg3ZEJb8IBB", + "y6mNqyE7l1Ub107+Cg7Q87Gzowu4Lcy2LyDd4AJtd7EO9XAci+KxpjSWTbM4PmteEnDQSN9iwU/1XVtH", + "Yvmh3V2XEjW/A3J0SsdiIg3fQ91RfkXvlfiK3qu+Fw1VJnp+feWZrnn17jGF0MtSdVZd3ZfZck7DRbK7", + "982hM9D6s0/HZxuPIO+tSrbBras8VHw2m46eq89nb7tyThyyGgzi9ofCjzUWv16NRQdYh+ZEvbkQ3Q1G", + "ZRHVgNynOtPr3GbSuor/mGq5plpHFx6YY+nuK0J9h++uXkTk4Lz17GXz3tWTsdt335f446BGv6wnHk8G", + "DfTEFRdou6I47c47kE7RfHGpjjXIQ9sjnEejh9fCfmVfrSvDbq/ENRHqM3fy83b+ygcJpTDluXFNQA4i", + "c29zYP7q1zC1Hupg09lBdX1yNmt+q1rrrLUf5dznykdntR4bS5zLr8v/BwAA//9lvkhIrY0AAA==", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/registry/app/api/openapi/contracts/artifact/types.gen.go b/registry/app/api/openapi/contracts/artifact/types.gen.go new file mode 100644 index 000000000..6241d5c2f --- /dev/null +++ b/registry/app/api/openapi/contracts/artifact/types.gen.go @@ -0,0 +1,977 @@ +// Package artifact provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen/v2 version v2.1.0 DO NOT EDIT. +package artifact + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/oapi-codegen/runtime" +) + +// Defines values for AuthType. +const ( + AuthTypeAnonymous AuthType = "Anonymous" + AuthTypeUserPassword AuthType = "UserPassword" +) + +// Defines values for ClientSetupStepType. +const ( + ClientSetupStepTypeGenerateToken ClientSetupStepType = "GenerateToken" + ClientSetupStepTypeStatic ClientSetupStepType = "Static" +) + +// Defines values for PackageType. +const ( + PackageTypeDOCKER PackageType = "DOCKER" + PackageTypeGENERIC PackageType = "GENERIC" + PackageTypeHELM PackageType = "HELM" + PackageTypeMAVEN PackageType = "MAVEN" +) + +// Defines values for RegistryType. +const ( + RegistryTypeUPSTREAM RegistryType = "UPSTREAM" + RegistryTypeVIRTUAL RegistryType = "VIRTUAL" +) + +// Defines values for Status. +const ( + StatusERROR Status = "ERROR" + StatusFAILURE Status = "FAILURE" + StatusSUCCESS Status = "SUCCESS" +) + +// Defines values for UpstreamConfigSource. +const ( + UpstreamConfigSourceCustom UpstreamConfigSource = "Custom" + UpstreamConfigSourceDockerhub UpstreamConfigSource = "Dockerhub" +) + +// Defines values for RegistryTypeParam. +const ( + UPSTREAM RegistryTypeParam = "UPSTREAM" + VIRTUAL RegistryTypeParam = "VIRTUAL" +) + +// Defines values for GetAllRegistriesParamsType. +const ( + GetAllRegistriesParamsTypeUPSTREAM GetAllRegistriesParamsType = "UPSTREAM" + GetAllRegistriesParamsTypeVIRTUAL GetAllRegistriesParamsType = "VIRTUAL" +) + +// Anonymous defines model for Anonymous. +type Anonymous interface{} + +// ArtifactLabelRequest defines model for ArtifactLabelRequest. +type ArtifactLabelRequest struct { + Labels []string `json:"labels"` +} + +// ArtifactMetadata Artifact Metadata +type ArtifactMetadata struct { + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + Labels *[]string `json:"labels,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + LatestVersion string `json:"latestVersion"` + Name string `json:"name"` + + // PackageType refers to package + PackageType *PackageType `json:"packageType,omitempty"` + RegistryIdentifier string `json:"registryIdentifier"` + RegistryPath string `json:"registryPath"` +} + +// ArtifactStats Harness Artifact Stats +type ArtifactStats struct { + DownloadCount *int64 `json:"downloadCount,omitempty"` + DownloadSize *int64 `json:"downloadSize,omitempty"` + TotalStorageSize *int64 `json:"totalStorageSize,omitempty"` + UploadSize *int64 `json:"uploadSize,omitempty"` +} + +// ArtifactSummary Harness Artifact Summary +type ArtifactSummary struct { + CreatedAt *string `json:"createdAt,omitempty"` + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + ImageName string `json:"imageName"` + Labels *[]string `json:"labels,omitempty"` + ModifiedAt *string `json:"modifiedAt,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` +} + +// ArtifactVersionMetadata Artifact Version Metadata +type ArtifactVersionMetadata struct { + DigestCount *int64 `json:"digestCount,omitempty"` + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + IslatestVersion *bool `json:"islatestVersion,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + Name string `json:"name"` + + // PackageType refers to package + PackageType *PackageType `json:"packageType,omitempty"` + PullCommand *string `json:"pullCommand,omitempty"` + RegistryIdentifier string `json:"registryIdentifier"` + RegistryPath string `json:"registryPath"` + Size *string `json:"size,omitempty"` +} + +// ArtifactVersionSummary Docker Artifact Version Summary +type ArtifactVersionSummary struct { + ImageName string `json:"imageName"` + IsLatestVersion *bool `json:"isLatestVersion,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + Version string `json:"version"` +} + +// AuthType Authentication type +type AuthType string + +// CleanupPolicy Cleanup Policy for Harness Artifact Registries +type CleanupPolicy struct { + ExpireDays *int `json:"expireDays,omitempty"` + Name *string `json:"name,omitempty"` + PackagePrefix *[]string `json:"packagePrefix,omitempty"` + VersionPrefix *[]string `json:"versionPrefix,omitempty"` +} + +// ClientSetupDetails Client Setup Details +type ClientSetupDetails struct { + MainHeader string `json:"mainHeader"` + SecHeader string `json:"secHeader"` + Sections []ClientSetupSection `json:"sections"` +} + +// ClientSetupSection Client Setup Section +type ClientSetupSection struct { + Header *string `json:"header,omitempty"` + Steps *[]ClientSetupStep `json:"steps,omitempty"` +} + +// ClientSetupStep Client Setup Step +type ClientSetupStep struct { + Commands *[]string `json:"commands,omitempty"` + Header *string `json:"header,omitempty"` + + // Type ClientSetupStepType type + Type *ClientSetupStepType `json:"type,omitempty"` +} + +// ClientSetupStepType ClientSetupStepType type +type ClientSetupStepType string + +// DockerArtifactDetail Docker Artifact Detail +type DockerArtifactDetail struct { + CreatedAt *string `json:"createdAt,omitempty"` + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + ImageName string `json:"imageName"` + IsLatestVersion *bool `json:"isLatestVersion,omitempty"` + ModifiedAt *string `json:"modifiedAt,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + PullCommand *string `json:"pullCommand,omitempty"` + RegistryPath string `json:"registryPath"` + Size *string `json:"size,omitempty"` + Url string `json:"url"` + Version string `json:"version"` +} + +// DockerArtifactManifest Docker Artifact Manifest +type DockerArtifactManifest struct { + Manifest string `json:"manifest"` +} + +// DockerLayerEntry Harness Artifact Layers +type DockerLayerEntry struct { + Command string `json:"command"` + Size *string `json:"size,omitempty"` +} + +// DockerLayersSummary Harness Layers Summary +type DockerLayersSummary struct { + Digest string `json:"digest"` + Layers *[]DockerLayerEntry `json:"layers,omitempty"` + OsArch *string `json:"osArch,omitempty"` +} + +// DockerManifestDetails Harness Artifact Layers +type DockerManifestDetails struct { + CreatedAt *string `json:"createdAt,omitempty"` + Digest string `json:"digest"` + OsArch string `json:"osArch"` + Size *string `json:"size,omitempty"` +} + +// DockerManifests Harness Manifests +type DockerManifests struct { + ImageName string `json:"imageName"` + IsLatestVersion *bool `json:"isLatestVersion,omitempty"` + Manifests *[]DockerManifestDetails `json:"manifests,omitempty"` + Version string `json:"version"` +} + +// Error defines model for Error. +type Error struct { + // Code The http error code + Code string `json:"code"` + + // Details Additional details about the error + Details *map[string]interface{} `json:"details,omitempty"` + + // Message The reason the request failed + Message string `json:"message"` +} + +// HelmArtifactDetail Helm Artifact Detail +type HelmArtifactDetail struct { + Artifact *string `json:"artifact,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + IsLatestVersion *bool `json:"isLatestVersion,omitempty"` + ModifiedAt *string `json:"modifiedAt,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + PullCommand *string `json:"pullCommand,omitempty"` + RegistryPath string `json:"registryPath"` + Size *string `json:"size,omitempty"` + Url string `json:"url"` + Version string `json:"version"` +} + +// HelmArtifactManifest Helm Artifact Manifest +type HelmArtifactManifest struct { + Manifest string `json:"manifest"` +} + +// ListArtifact A list of Artifacts +type ListArtifact struct { + // Artifacts A list of Artifact + Artifacts []ArtifactMetadata `json:"artifacts"` + + // ItemCount The total number of items + ItemCount *int64 `json:"itemCount,omitempty"` + + // PageCount The total number of pages + PageCount *int64 `json:"pageCount,omitempty"` + + // PageIndex The current page + PageIndex *int64 `json:"pageIndex,omitempty"` + + // PageSize The number of items per page + PageSize *int `json:"pageSize,omitempty"` +} + +// ListArtifactLabel A list of Harness Artifact Labels +type ListArtifactLabel struct { + // ItemCount The total number of items + ItemCount *int64 `json:"itemCount,omitempty"` + Labels []string `json:"labels"` + + // PageCount The total number of pages + PageCount *int64 `json:"pageCount,omitempty"` + + // PageIndex The current page + PageIndex *int64 `json:"pageIndex,omitempty"` + + // PageSize The number of items per page + PageSize *int `json:"pageSize,omitempty"` +} + +// ListArtifactVersion A list of Artifact versions +type ListArtifactVersion struct { + // ArtifactVersions A list of Artifact versions + ArtifactVersions *[]ArtifactVersionMetadata `json:"artifactVersions,omitempty"` + + // ItemCount The total number of items + ItemCount *int64 `json:"itemCount,omitempty"` + + // PageCount The total number of pages + PageCount *int64 `json:"pageCount,omitempty"` + + // PageIndex The current page + PageIndex *int64 `json:"pageIndex,omitempty"` + + // PageSize The number of items per page + PageSize *int `json:"pageSize,omitempty"` +} + +// ListRegistry A list of Harness Artifact Registries +type ListRegistry struct { + // ItemCount The total number of items + ItemCount *int64 `json:"itemCount,omitempty"` + + // PageCount The total number of pages + PageCount *int64 `json:"pageCount,omitempty"` + + // PageIndex The current page + PageIndex *int64 `json:"pageIndex,omitempty"` + + // PageSize The number of items per page + PageSize *int `json:"pageSize,omitempty"` + + // Registries A list of Harness Artifact Registries + Registries []RegistryMetadata `json:"registries"` +} + +// PackageType refers to package +type PackageType string + +// Registry Harness Artifact Registry +type Registry struct { + AllowedPattern *[]string `json:"allowedPattern,omitempty"` + BlockedPattern *[]string `json:"blockedPattern,omitempty"` + CleanupPolicy *[]CleanupPolicy `json:"cleanupPolicy,omitempty"` + + // Config SubConfig specific for Virtual or Upstream Registry + Config *RegistryConfig `json:"config,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + Description *string `json:"description,omitempty"` + Identifier string `json:"identifier"` + Labels *[]string `json:"labels,omitempty"` + ModifiedAt *string `json:"modifiedAt,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + Url string `json:"url"` +} + +// RegistryConfig SubConfig specific for Virtual or Upstream Registry +type RegistryConfig struct { + // Type refers to type of registry i.e virtual or upstream + Type RegistryType `json:"type"` + union json.RawMessage +} + +// RegistryMetadata Harness Artifact Registry Metadata +type RegistryMetadata struct { + ArtifactsCount *int64 `json:"artifactsCount,omitempty"` + Description *string `json:"description,omitempty"` + DownloadsCount *int64 `json:"downloadsCount,omitempty"` + Identifier string `json:"identifier"` + Labels *[]string `json:"labels,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + Path *string `json:"path,omitempty"` + RegistrySize *string `json:"registrySize,omitempty"` + + // Type refers to type of registry i.e virtual or upstream + Type RegistryType `json:"type"` + Url string `json:"url"` +} + +// RegistryRequest defines model for RegistryRequest. +type RegistryRequest struct { + AllowedPattern *[]string `json:"allowedPattern,omitempty"` + BlockedPattern *[]string `json:"blockedPattern,omitempty"` + CleanupPolicy *[]CleanupPolicy `json:"cleanupPolicy,omitempty"` + + // Config SubConfig specific for Virtual or Upstream Registry + Config *RegistryConfig `json:"config,omitempty"` + Description *string `json:"description,omitempty"` + Identifier string `json:"identifier"` + Labels *[]string `json:"labels,omitempty"` + + // PackageType refers to package + PackageType PackageType `json:"packageType"` + ParentRef *string `json:"parentRef,omitempty"` +} + +// RegistryType refers to type of registry i.e virtual or upstream +type RegistryType string + +// Status Indicates if the request was successful or not +type Status string + +// UpstreamConfig Configuration for Harness Artifact UpstreamProxies +type UpstreamConfig struct { + Auth *UpstreamConfig_Auth `json:"auth,omitempty"` + + // AuthType Authentication type + AuthType AuthType `json:"authType"` + Source *UpstreamConfigSource `json:"source,omitempty"` + Url *string `json:"url,omitempty"` +} + +// UpstreamConfig_Auth defines model for UpstreamConfig.Auth. +type UpstreamConfig_Auth struct { + union json.RawMessage +} + +// UpstreamConfigSource defines model for UpstreamConfig.Source. +type UpstreamConfigSource string + +// UserPassword defines model for UserPassword. +type UserPassword struct { + SecretIdentifier *string `json:"secretIdentifier,omitempty"` + SecretSpaceId *int `json:"secretSpaceId,omitempty"` + UserName string `json:"userName"` +} + +// VirtualConfig Configuration for Harness Virtual Artifact Registries +type VirtualConfig struct { + UpstreamProxies *[]string `json:"upstreamProxies,omitempty"` +} + +// LabelsParam defines model for LabelsParam. +type LabelsParam []string + +// RegistryIdentifierParam defines model for RegistryIdentifierParam. +type RegistryIdentifierParam string + +// RegistryTypeParam defines model for RegistryTypeParam. +type RegistryTypeParam string + +// ArtifactParam defines model for artifactParam. +type ArtifactParam string + +// ArtifactPathParam defines model for artifactPathParam. +type ArtifactPathParam string + +// DigestParam defines model for digestParam. +type DigestParam string + +// FromDateParam defines model for fromDateParam. +type FromDateParam string + +// PackageTypeParam defines model for packageTypeParam. +type PackageTypeParam []string + +// PageNumber defines model for pageNumber. +type PageNumber int64 + +// PageSize defines model for pageSize. +type PageSize int64 + +// RegistryRefPathParam defines model for registryRefPathParam. +type RegistryRefPathParam string + +// SearchTerm defines model for searchTerm. +type SearchTerm string + +// SortField defines model for sortField. +type SortField string + +// SortOrder defines model for sortOrder. +type SortOrder string + +// SpaceRefPathParam defines model for spaceRefPathParam. +type SpaceRefPathParam string + +// ToDateParam defines model for toDateParam. +type ToDateParam string + +// VersionParam defines model for versionParam. +type VersionParam string + +// VersionPathParam defines model for versionPathParam. +type VersionPathParam string + +// ArtifactLabelResponse defines model for ArtifactLabelResponse. +type ArtifactLabelResponse struct { + // Data Harness Artifact Summary + Data ArtifactSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ArtifactStatsResponse defines model for ArtifactStatsResponse. +type ArtifactStatsResponse struct { + // Data Harness Artifact Stats + Data ArtifactStats `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ArtifactSummaryResponse defines model for ArtifactSummaryResponse. +type ArtifactSummaryResponse struct { + // Data Harness Artifact Summary + Data ArtifactSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ArtifactVersionSummaryResponse defines model for ArtifactVersionSummaryResponse. +type ArtifactVersionSummaryResponse struct { + // Data Docker Artifact Version Summary + Data ArtifactVersionSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// BadRequest defines model for BadRequest. +type BadRequest Error + +// ClientSetupDetailsResponse defines model for ClientSetupDetailsResponse. +type ClientSetupDetailsResponse struct { + // Data Client Setup Details + Data ClientSetupDetails `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// DockerArtifactDetailResponse defines model for DockerArtifactDetailResponse. +type DockerArtifactDetailResponse struct { + // Data Docker Artifact Detail + Data DockerArtifactDetail `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// DockerArtifactManifestResponse defines model for DockerArtifactManifestResponse. +type DockerArtifactManifestResponse struct { + // Data Docker Artifact Manifest + Data DockerArtifactManifest `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// DockerLayersResponse defines model for DockerLayersResponse. +type DockerLayersResponse struct { + // Data Harness Layers Summary + Data DockerLayersSummary `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// DockerManifestsResponse defines model for DockerManifestsResponse. +type DockerManifestsResponse struct { + // Data Harness Manifests + Data DockerManifests `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// HelmArtifactDetailResponse defines model for HelmArtifactDetailResponse. +type HelmArtifactDetailResponse struct { + // Data Helm Artifact Detail + Data HelmArtifactDetail `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// HelmArtifactManifestResponse defines model for HelmArtifactManifestResponse. +type HelmArtifactManifestResponse struct { + // Data Helm Artifact Manifest + Data HelmArtifactManifest `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// InternalServerError defines model for InternalServerError. +type InternalServerError Error + +// ListArtifactLabelResponse defines model for ListArtifactLabelResponse. +type ListArtifactLabelResponse struct { + // Data A list of Harness Artifact Labels + Data ListArtifactLabel `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ListArtifactResponse defines model for ListArtifactResponse. +type ListArtifactResponse struct { + // Data A list of Artifacts + Data ListArtifact `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ListArtifactVersionResponse defines model for ListArtifactVersionResponse. +type ListArtifactVersionResponse struct { + // Data A list of Artifact versions + Data ListArtifactVersion `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// ListRegistryResponse defines model for ListRegistryResponse. +type ListRegistryResponse struct { + // Data A list of Harness Artifact Registries + Data ListRegistry `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// NotFound defines model for NotFound. +type NotFound Error + +// RegistryResponse defines model for RegistryResponse. +type RegistryResponse struct { + // Data Harness Artifact Registry + Data Registry `json:"data"` + + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// Success defines model for Success. +type Success struct { + // Status Indicates if the request was successful or not + Status Status `json:"status"` +} + +// Unauthenticated defines model for Unauthenticated. +type Unauthenticated Error + +// Unauthorized defines model for Unauthorized. +type Unauthorized Error + +// ListArtifactLabelsParams defines parameters for ListArtifactLabels. +type ListArtifactLabelsParams struct { + // Page Current page number + Page *PageNumber `form:"page,omitempty" json:"page,omitempty"` + + // Size Number of items per page + Size *PageSize `form:"size,omitempty" json:"size,omitempty"` + + // SearchTerm search Term. + SearchTerm *SearchTerm `form:"search_term,omitempty" json:"search_term,omitempty"` +} + +// GetArtifactStatsForRegistryParams defines parameters for GetArtifactStatsForRegistry. +type GetArtifactStatsForRegistryParams struct { + // From Date. Format - MM/DD/YYYY + From *FromDateParam `form:"from,omitempty" json:"from,omitempty"` + + // To Date. Format - MM/DD/YYYY + To *ToDateParam `form:"to,omitempty" json:"to,omitempty"` +} + +// GetArtifactStatsParams defines parameters for GetArtifactStats. +type GetArtifactStatsParams struct { + // From Date. Format - MM/DD/YYYY + From *FromDateParam `form:"from,omitempty" json:"from,omitempty"` + + // To Date. Format - MM/DD/YYYY + To *ToDateParam `form:"to,omitempty" json:"to,omitempty"` +} + +// GetDockerArtifactDetailsParams defines parameters for GetDockerArtifactDetails. +type GetDockerArtifactDetailsParams struct { + // Digest Digest. + Digest DigestParam `form:"digest" json:"digest"` +} + +// GetDockerArtifactLayersParams defines parameters for GetDockerArtifactLayers. +type GetDockerArtifactLayersParams struct { + // Digest Digest. + Digest DigestParam `form:"digest" json:"digest"` +} + +// GetDockerArtifactManifestParams defines parameters for GetDockerArtifactManifest. +type GetDockerArtifactManifestParams struct { + // Digest Digest. + Digest DigestParam `form:"digest" json:"digest"` +} + +// GetAllArtifactVersionsParams defines parameters for GetAllArtifactVersions. +type GetAllArtifactVersionsParams struct { + // Page Current page number + Page *PageNumber `form:"page,omitempty" json:"page,omitempty"` + + // Size Number of items per page + Size *PageSize `form:"size,omitempty" json:"size,omitempty"` + + // SortOrder sortOrder + SortOrder *SortOrder `form:"sort_order,omitempty" json:"sort_order,omitempty"` + + // SortField sortField + SortField *SortField `form:"sort_field,omitempty" json:"sort_field,omitempty"` + + // SearchTerm search Term. + SearchTerm *SearchTerm `form:"search_term,omitempty" json:"search_term,omitempty"` +} + +// GetClientSetupDetailsParams defines parameters for GetClientSetupDetails. +type GetClientSetupDetailsParams struct { + // Artifact Artifat + Artifact *ArtifactParam `form:"artifact,omitempty" json:"artifact,omitempty"` + + // Version Version + Version *VersionParam `form:"version,omitempty" json:"version,omitempty"` +} + +// GetArtifactStatsForSpaceParams defines parameters for GetArtifactStatsForSpace. +type GetArtifactStatsForSpaceParams struct { + // From Date. Format - MM/DD/YYYY + From *FromDateParam `form:"from,omitempty" json:"from,omitempty"` + + // To Date. Format - MM/DD/YYYY + To *ToDateParam `form:"to,omitempty" json:"to,omitempty"` +} + +// GetAllArtifactsParams defines parameters for GetAllArtifacts. +type GetAllArtifactsParams struct { + // Label Label. + Label *LabelsParam `form:"label,omitempty" json:"label,omitempty"` + + // PackageType Registry Package Type + PackageType *PackageTypeParam `form:"package_type,omitempty" json:"package_type,omitempty"` + + // RegIdentifier Registry Identifier + RegIdentifier *RegistryIdentifierParam `form:"reg_identifier,omitempty" json:"reg_identifier,omitempty"` + + // Page Current page number + Page *PageNumber `form:"page,omitempty" json:"page,omitempty"` + + // Size Number of items per page + Size *PageSize `form:"size,omitempty" json:"size,omitempty"` + + // SortOrder sortOrder + SortOrder *SortOrder `form:"sort_order,omitempty" json:"sort_order,omitempty"` + + // SortField sortField + SortField *SortField `form:"sort_field,omitempty" json:"sort_field,omitempty"` + + // SearchTerm search Term. + SearchTerm *SearchTerm `form:"search_term,omitempty" json:"search_term,omitempty"` +} + +// GetAllRegistriesParams defines parameters for GetAllRegistries. +type GetAllRegistriesParams struct { + // PackageType Registry Package Type + PackageType *PackageTypeParam `form:"package_type,omitempty" json:"package_type,omitempty"` + + // Type Registry Type + Type *GetAllRegistriesParamsType `form:"type,omitempty" json:"type,omitempty"` + + // Page Current page number + Page *PageNumber `form:"page,omitempty" json:"page,omitempty"` + + // Size Number of items per page + Size *PageSize `form:"size,omitempty" json:"size,omitempty"` + + // SortOrder sortOrder + SortOrder *SortOrder `form:"sort_order,omitempty" json:"sort_order,omitempty"` + + // SortField sortField + SortField *SortField `form:"sort_field,omitempty" json:"sort_field,omitempty"` + + // SearchTerm search Term. + SearchTerm *SearchTerm `form:"search_term,omitempty" json:"search_term,omitempty"` +} + +// GetAllRegistriesParamsType defines parameters for GetAllRegistries. +type GetAllRegistriesParamsType string + +// CreateRegistryJSONRequestBody defines body for CreateRegistry for application/json ContentType. +type CreateRegistryJSONRequestBody RegistryRequest + +// ModifyRegistryJSONRequestBody defines body for ModifyRegistry for application/json ContentType. +type ModifyRegistryJSONRequestBody RegistryRequest + +// UpdateArtifactLabelsJSONRequestBody defines body for UpdateArtifactLabels for application/json ContentType. +type UpdateArtifactLabelsJSONRequestBody ArtifactLabelRequest + +// AsVirtualConfig returns the union data inside the RegistryConfig as a VirtualConfig +func (t RegistryConfig) AsVirtualConfig() (VirtualConfig, error) { + var body VirtualConfig + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromVirtualConfig overwrites any union data inside the RegistryConfig as the provided VirtualConfig +func (t *RegistryConfig) FromVirtualConfig(v VirtualConfig) error { + t.Type = "VIRTUAL" + + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeVirtualConfig performs a merge with any union data inside the RegistryConfig, using the provided VirtualConfig +func (t *RegistryConfig) MergeVirtualConfig(v VirtualConfig) error { + t.Type = "VIRTUAL" + + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsUpstreamConfig returns the union data inside the RegistryConfig as a UpstreamConfig +func (t RegistryConfig) AsUpstreamConfig() (UpstreamConfig, error) { + var body UpstreamConfig + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromUpstreamConfig overwrites any union data inside the RegistryConfig as the provided UpstreamConfig +func (t *RegistryConfig) FromUpstreamConfig(v UpstreamConfig) error { + t.Type = "UPSTREAM" + + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeUpstreamConfig performs a merge with any union data inside the RegistryConfig, using the provided UpstreamConfig +func (t *RegistryConfig) MergeUpstreamConfig(v UpstreamConfig) error { + t.Type = "UPSTREAM" + + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t RegistryConfig) Discriminator() (string, error) { + var discriminator struct { + Discriminator string `json:"type"` + } + err := json.Unmarshal(t.union, &discriminator) + return discriminator.Discriminator, err +} + +func (t RegistryConfig) ValueByDiscriminator() (interface{}, error) { + discriminator, err := t.Discriminator() + if err != nil { + return nil, err + } + switch discriminator { + case "UPSTREAM": + return t.AsUpstreamConfig() + case "VIRTUAL": + return t.AsVirtualConfig() + default: + return nil, errors.New("unknown discriminator value: " + discriminator) + } +} + +func (t RegistryConfig) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + if err != nil { + return nil, err + } + object := make(map[string]json.RawMessage) + if t.union != nil { + err = json.Unmarshal(b, &object) + if err != nil { + return nil, err + } + } + + object["type"], err = json.Marshal(t.Type) + if err != nil { + return nil, fmt.Errorf("error marshaling 'type': %w", err) + } + + b, err = json.Marshal(object) + return b, err +} + +func (t *RegistryConfig) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + if err != nil { + return err + } + object := make(map[string]json.RawMessage) + err = json.Unmarshal(b, &object) + if err != nil { + return err + } + + if raw, found := object["type"]; found { + err = json.Unmarshal(raw, &t.Type) + if err != nil { + return fmt.Errorf("error reading 'type': %w", err) + } + } + + return err +} + +// AsUserPassword returns the union data inside the UpstreamConfig_Auth as a UserPassword +func (t UpstreamConfig_Auth) AsUserPassword() (UserPassword, error) { + var body UserPassword + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromUserPassword overwrites any union data inside the UpstreamConfig_Auth as the provided UserPassword +func (t *UpstreamConfig_Auth) FromUserPassword(v UserPassword) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeUserPassword performs a merge with any union data inside the UpstreamConfig_Auth, using the provided UserPassword +func (t *UpstreamConfig_Auth) MergeUserPassword(v UserPassword) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsAnonymous returns the union data inside the UpstreamConfig_Auth as a Anonymous +func (t UpstreamConfig_Auth) AsAnonymous() (Anonymous, error) { + var body Anonymous + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromAnonymous overwrites any union data inside the UpstreamConfig_Auth as the provided Anonymous +func (t *UpstreamConfig_Auth) FromAnonymous(v Anonymous) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeAnonymous performs a merge with any union data inside the UpstreamConfig_Auth, using the provided Anonymous +func (t *UpstreamConfig_Auth) MergeAnonymous(v Anonymous) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t UpstreamConfig_Auth) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *UpstreamConfig_Auth) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} diff --git a/registry/app/api/router/harness/route.go b/registry/app/api/router/harness/route.go new file mode 100644 index 000000000..7000e4bd4 --- /dev/null +++ b/registry/app/api/router/harness/route.go @@ -0,0 +1,76 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package harness + +import ( + "net/http" + + middlewareauthn "github.com/harness/gitness/app/api/middleware/authn" + "github.com/harness/gitness/app/auth/authn" + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + urlprovider "github.com/harness/gitness/app/url" + "github.com/harness/gitness/audit" + "github.com/harness/gitness/registry/app/api/controller/metadata" + "github.com/harness/gitness/registry/app/api/middleware" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/go-chi/chi/v5" +) + +type APIHandler interface { + http.Handler +} + +func NewAPIHandler( + repoDao store.RegistryRepository, + upstreamproxyDao store.UpstreamProxyConfigRepository, + tagDao store.TagRepository, + manifestDao store.ManifestRepository, + cleanupPolicyDao store.CleanupPolicyRepository, + artifactDao store.ArtifactRepository, + driver storagedriver.StorageDriver, + baseURL string, + spaceStore corestore.SpaceStore, + tx dbtx.Transactor, + authenticator authn.Authenticator, + urlProvider urlprovider.Provider, + authorizer authz.Authorizer, + auditService audit.Service, +) APIHandler { + r := chi.NewRouter() + r.Use(audit.Middleware()) + r.Use(middlewareauthn.Attempt(authenticator)) + r.Use(middleware.CheckAuth()) + apiController := metadata.NewAPIController( + repoDao, + upstreamproxyDao, + tagDao, + manifestDao, + cleanupPolicyDao, + artifactDao, + driver, + spaceStore, + tx, + urlProvider, + authorizer, + auditService, + ) + handler := artifact.NewStrictHandler(apiController, []artifact.StrictMiddlewareFunc{}) + return artifact.HandlerFromMuxWithBaseURL(handler, r, baseURL) +} diff --git a/registry/app/api/router/oci/route.go b/registry/app/api/router/oci/route.go new file mode 100644 index 000000000..7f9402cc9 --- /dev/null +++ b/registry/app/api/router/oci/route.go @@ -0,0 +1,151 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oci + +import ( + "net/http" + "strings" + + middlewareauthn "github.com/harness/gitness/app/api/middleware/authn" + "github.com/harness/gitness/registry/app/api/handler/oci" + "github.com/harness/gitness/registry/app/api/middleware" + "github.com/harness/gitness/registry/app/common" + + "github.com/go-chi/chi/v5" + "github.com/rs/zerolog/log" +) + +type RouteType string + +const ( + Manifests RouteType = "manifests" // /v2/:registry/:image/manifests/:reference. + Blobs RouteType = "blobs" // /v2/:registry/:image/blobs/:digest. + BlobsUploadsSession RouteType = "blob-uploads-session" // /v2/:registry/:image/blobs/uploads/:session_id. + Tags RouteType = "tags" // /v2/:registry/:image/tags/list. + Referrers RouteType = "referrers" // /v2/:registry/:image/referrers/:digest. + Invalid RouteType = "invalid" // Invalid route. + // Add other route types here. +) + +func GetRouteTypeV2(url string) RouteType { + url = strings.Trim(url, "/") + segments := strings.Split(url, "/") + if len(segments) < 4 { + return Invalid + } + + typ := segments[len(segments)-2] + + switch typ { + case "manifests": + return Manifests + case "blobs": + if segments[len(segments)-1] == "uploads" { + return BlobsUploadsSession + } + return Blobs + case "uploads": + return BlobsUploadsSession + case "tags": + return Tags + case "referrers": + return Referrers + } + return Invalid +} + +type HandlerBlock struct { + Handler2 http.HandlerFunc + RemoteSupport bool +} + +func NewHandlerBlock2(h2 http.HandlerFunc, remoteSupport bool) HandlerBlock { + return HandlerBlock{ + Handler2: h2, + RemoteSupport: remoteSupport, + } +} + +type RegistryOCIHandler interface { + http.Handler +} + +func NewOCIHandler(handlerV2 *oci.Handler) RegistryOCIHandler { + r := chi.NewRouter() + + var routeHandlers = map[RouteType]map[string]HandlerBlock{ + Manifests: { + http.MethodGet: NewHandlerBlock2(handlerV2.GetManifest, true), + http.MethodHead: NewHandlerBlock2(handlerV2.HeadManifest, true), + http.MethodPut: NewHandlerBlock2(handlerV2.PutManifest, false), + http.MethodDelete: NewHandlerBlock2(handlerV2.DeleteManifest, false), + }, + Blobs: { + http.MethodGet: NewHandlerBlock2(handlerV2.GetBlob, true), + http.MethodHead: NewHandlerBlock2(handlerV2.HeadBlob, false), + http.MethodDelete: NewHandlerBlock2(handlerV2.DeleteBlob, false), + }, + BlobsUploadsSession: { + http.MethodGet: NewHandlerBlock2(handlerV2.GetUploadBlobStatus, false), + http.MethodPatch: NewHandlerBlock2(handlerV2.PatchBlobUpload, false), + http.MethodPut: NewHandlerBlock2(handlerV2.CompleteBlobUpload, false), + http.MethodDelete: NewHandlerBlock2(handlerV2.CancelBlobUpload, false), + http.MethodPost: NewHandlerBlock2(handlerV2.InitiateUploadBlob, false), + }, + Tags: { + http.MethodGet: NewHandlerBlock2(handlerV2.GetTags, false), + }, + Referrers: { + http.MethodGet: NewHandlerBlock2(handlerV2.GetReferrers, false), + }, + } + r.Route("/v2", func(r chi.Router) { + r.Use(middlewareauthn.Attempt(handlerV2.Authenticator)) + r.Get("/token", func(w http.ResponseWriter, req *http.Request) { + handlerV2.GetToken(w, req) + }) + + r.With(middleware.OciCheckAuth(common.GenerateOciTokenURL(handlerV2.URLProvider.RegistryURL()))). + Get("/", func(w http.ResponseWriter, req *http.Request) { + handlerV2.APIBase(w, req) + }) + r.Route("/{registryIdentifier}", func(r chi.Router) { + r.Use(middleware.OciCheckAuth(common.GenerateOciTokenURL(handlerV2.URLProvider.RegistryURL()))) + r.Use(middleware.BlockNonOciSourceToken(handlerV2.URLProvider.RegistryURL())) + r.Handle("/*", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + path := req.URL.Path + methodType := req.Method + + requestType := GetRouteTypeV2(path) + + if _, ok := routeHandlers[requestType]; ok { + if h, ok2 := routeHandlers[requestType][methodType]; ok2 { + h.Handler2(w, req) + return + } + } + + w.WriteHeader(http.StatusNotFound) + _, err := w.Write([]byte("Invalid route")) + if err != nil { + log.Error().Err(err).Msg("Failed to write response") + return + } + })) + }) + }) + + return r +} diff --git a/registry/app/api/router/registry_router.go b/registry/app/api/router/registry_router.go new file mode 100644 index 000000000..190d1ae8e --- /dev/null +++ b/registry/app/api/router/registry_router.go @@ -0,0 +1,51 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package router + +import ( + "net/http" + "strings" +) + +const RegistryMount = "/api/v1/registry" +const APIMount = "/api" + +type RegistryRouter struct { + handler http.Handler +} + +func NewRegistryRouter(handler http.Handler) *RegistryRouter { + return &RegistryRouter{handler: handler} +} + +func (r *RegistryRouter) Handle(w http.ResponseWriter, req *http.Request) { + r.handler.ServeHTTP(w, req) +} + +func (r *RegistryRouter) IsEligibleTraffic(req *http.Request) bool { + if strings.HasPrefix(req.URL.Path, RegistryMount) || strings.HasPrefix(req.URL.Path, "/v2/") || + strings.HasPrefix(req.URL.Path, "/registry/") || + (strings.HasPrefix(req.URL.Path, APIMount+"/v1/spaces/") && + (strings.HasSuffix(req.URL.Path, "/artifacts") || + strings.HasSuffix(req.URL.Path, "/registries"))) { + return true + } + + return false +} + +func (r *RegistryRouter) Name() string { + return "registry" +} diff --git a/registry/app/api/router/router.go b/registry/app/api/router/router.go new file mode 100644 index 000000000..684fc000e --- /dev/null +++ b/registry/app/api/router/router.go @@ -0,0 +1,54 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package router + +import ( + "fmt" + "net/http" + + "github.com/harness/gitness/app/api/middleware/address" + "github.com/harness/gitness/app/api/middleware/logging" + "github.com/harness/gitness/registry/app/api/handler/swagger" + "github.com/harness/gitness/registry/app/api/router/harness" + "github.com/harness/gitness/registry/app/api/router/oci" + + "github.com/go-chi/chi/v5" + "github.com/rs/zerolog/hlog" +) + +type AppRouter interface { + http.Handler +} + +func GetAppRouter( + ociHandler oci.RegistryOCIHandler, + appHandler harness.APIHandler, + baseURL string, +) AppRouter { + r := chi.NewRouter() + r.Use(hlog.URLHandler("http.url")) + r.Use(hlog.MethodHandler("http.method")) + r.Use(logging.HLogRequestIDHandler()) + r.Use(logging.HLogAccessLogHandler()) + r.Use(address.Handler("", "")) + + r.Group(func(r chi.Router) { + r.Handle(fmt.Sprintf("%s/*", baseURL), appHandler) + r.Handle("/v2/*", ociHandler) + + r.Handle("/registry/swagger*", swagger.GetSwaggerHandler("/registry")) + }) + return r +} diff --git a/registry/app/api/router/wire.go b/registry/app/api/router/wire.go new file mode 100644 index 000000000..af6253a69 --- /dev/null +++ b/registry/app/api/router/wire.go @@ -0,0 +1,78 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package router + +import ( + "github.com/harness/gitness/app/auth/authn" + "github.com/harness/gitness/app/auth/authz" + "github.com/harness/gitness/app/config" + corestore "github.com/harness/gitness/app/store" + urlprovider "github.com/harness/gitness/app/url" + "github.com/harness/gitness/audit" + hoci "github.com/harness/gitness/registry/app/api/handler/oci" + "github.com/harness/gitness/registry/app/api/router/harness" + "github.com/harness/gitness/registry/app/api/router/oci" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/google/wire" +) + +func AppRouterProvider( + ocir oci.RegistryOCIHandler, + appHandler harness.APIHandler, +) AppRouter { + return GetAppRouter(ocir, appHandler, config.APIURL) +} + +func APIHandlerProvider( + repoDao store.RegistryRepository, + upstreamproxyDao store.UpstreamProxyConfigRepository, + tagDao store.TagRepository, + manifestDao store.ManifestRepository, + cleanupPolicyDao store.CleanupPolicyRepository, + artifactDao store.ArtifactRepository, + driver storagedriver.StorageDriver, + spaceStore corestore.SpaceStore, + tx dbtx.Transactor, + authenticator authn.Authenticator, + urlProvider urlprovider.Provider, + authorizer authz.Authorizer, + auditService audit.Service, +) harness.APIHandler { + return harness.NewAPIHandler( + repoDao, + upstreamproxyDao, + tagDao, + manifestDao, + cleanupPolicyDao, + artifactDao, + driver, + config.APIURL, + spaceStore, + tx, + authenticator, + urlProvider, + authorizer, + auditService, + ) +} + +func OCIHandlerProvider(handlerV2 *hoci.Handler) oci.RegistryOCIHandler { + return oci.NewOCIHandler(handlerV2) +} + +var WireSet = wire.NewSet(APIHandlerProvider, OCIHandlerProvider, AppRouterProvider) diff --git a/registry/app/api/wire.go b/registry/app/api/wire.go new file mode 100644 index 000000000..27342ee56 --- /dev/null +++ b/registry/app/api/wire.go @@ -0,0 +1,85 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + usercontroller "github.com/harness/gitness/app/api/controller/user" + "github.com/harness/gitness/app/auth/authn" + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + urlprovider "github.com/harness/gitness/app/url" + ocihandler "github.com/harness/gitness/registry/app/api/handler/oci" + "github.com/harness/gitness/registry/app/api/router" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/driver/factory" + "github.com/harness/gitness/registry/app/driver/filesystem" + "github.com/harness/gitness/registry/app/driver/s3-aws" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/docker" + "github.com/harness/gitness/registry/app/store/database" + "github.com/harness/gitness/registry/config" + "github.com/harness/gitness/types" + + "github.com/google/wire" + "github.com/rs/zerolog/log" +) + +type RegistryApp struct { + Config *types.Config + + AppRouter router.AppRouter +} + +func BlobStorageProvider(c *types.Config) (storagedriver.StorageDriver, error) { + var d storagedriver.StorageDriver + var err error + + if c.Registry.Storage.StorageType == "filesystem" { + filesystem.Register() + d, err = factory.Create("filesystem", config.GetFilesystemParams(c)) + if err != nil { + log.Fatal().Stack().Err(err).Msgf("") + panic(err) + } + } else { + s3.Register() + d, err = factory.Create("s3aws", config.GetS3StorageParameters(c)) + if err != nil { + log.Error().Stack().Err(err).Msg("failed to init s3 Blob storage ") + panic(err) + } + } + return d, err +} + +func NewHandlerProvider(controller *docker.Controller, spaceStore corestore.SpaceStore, + tokenStore corestore.TokenStore, userCtrl *usercontroller.Controller, authenticator authn.Authenticator, + urlProvider urlprovider.Provider, authorizer authz.Authorizer) *ocihandler.Handler { + return ocihandler.NewHandler(controller, spaceStore, tokenStore, userCtrl, authenticator, urlProvider, authorizer) +} + +var WireSet = wire.NewSet( + BlobStorageProvider, + NewHandlerProvider, + database.WireSet, + pkg.WireSet, + docker.WireSet, + router.WireSet, +) + +func Wire(_ *types.Config) (RegistryApp, error) { + wire.Build(WireSet, wire.Struct(new(RegistryApp), "*")) + return RegistryApp{}, nil +} diff --git a/registry/app/auth/auth.go b/registry/app/auth/auth.go new file mode 100644 index 000000000..580283021 --- /dev/null +++ b/registry/app/auth/auth.go @@ -0,0 +1,169 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "net/http" + "strings" +) + +// AccessSet maps a typed, named resource to +// a set of actions requested or authorized. +type AccessSet map[Resource]ActionSet + +// NewAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func NewAccessSet(accessItems ...Access) AccessSet { + accessSet := make(AccessSet, len(accessItems)) + + for _, access := range accessItems { + resource := Resource{ + Type: access.Type, + Name: access.Name, + Space: access.Space, + } + + set, exists := accessSet[resource] + if !exists { + set = NewActionSet() + accessSet[resource] = set + } + + set.Add(access.Action) + } + + return accessSet +} + +// Contains returns whether or not the given access is in this accessSet. +func (s AccessSet) Contains(access Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// ScopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +func (s AccessSet) ScopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + resourceName := strings.Join([]string{resource.Space, resource.Name}, "/") + scopes = append(scopes, strings.Join([]string{resource.Type, resourceName, actions}, ":")) + } + + return strings.Join(scopes, " ") +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string + Space string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +func AppendAccess(records []Access, method string, rootIdentifier string, repo string) []Access { + resource := Resource{ + Type: "repository", + Name: repo, + Space: rootIdentifier, + } + + switch method { + case http.MethodGet, http.MethodHead: + records = append(records, + Access{ + Resource: resource, + Action: "pull", + }) + case http.MethodPost, http.MethodPut, http.MethodPatch: + records = append(records, + Access{ + Resource: resource, + Action: "pull", + }, + Access{ + Resource: resource, + Action: "push", + }) + case http.MethodDelete: + records = append(records, + Access{ + Resource: resource, + Action: "delete", + }) + } + return records +} + +// ActionSet is a special type of stringSet. +type ActionSet struct { + stringSet +} + +func NewActionSet(actions ...string) ActionSet { + return ActionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s ActionSet) Contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.Add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) Add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/registry/app/common/http/modifier/modifier.go b/registry/app/common/http/modifier/modifier.go new file mode 100644 index 000000000..f67549882 --- /dev/null +++ b/registry/app/common/http/modifier/modifier.go @@ -0,0 +1,26 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package modifier + +import ( + "net/http" +) + +// Modifier modifies request. +type Modifier interface { + Modify(*http.Request) error +} diff --git a/registry/app/common/http/tls.go b/registry/app/common/http/tls.go new file mode 100644 index 000000000..559d58d70 --- /dev/null +++ b/registry/app/common/http/tls.go @@ -0,0 +1,84 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "crypto/tls" + "fmt" + "os" + "strings" +) + +const ( + // Internal TLS ENV. + internalTLSEnable = "GITNESS_INTERNAL_TLS_ENABLED" + internalVerifyClientCert = "GITNESS_INTERNAL_VERIFY_CLIENT_CERT" + internalTLSKeyPath = "GITNESS_INTERNAL_TLS_KEY_PATH" + internalTLSCertPath = "GITNESS_INTERNAL_TLS_CERT_PATH" +) + +// InternalTLSEnabled returns true if internal TLS enabled. +func InternalTLSEnabled() bool { + return strings.ToLower(os.Getenv(internalTLSEnable)) == "true" +} + +// InternalEnableVerifyClientCert returns true if mTLS enabled. +func InternalEnableVerifyClientCert() bool { + return strings.ToLower(os.Getenv(internalVerifyClientCert)) == "true" +} + +// GetInternalCertPair used to get internal cert and key pair from environment. +func GetInternalCertPair() (tls.Certificate, error) { + crtPath := os.Getenv(internalTLSCertPath) + keyPath := os.Getenv(internalTLSKeyPath) + return tls.LoadX509KeyPair(crtPath, keyPath) +} + +// GetInternalTLSConfig return a tls.Config for internal https communicate. +func GetInternalTLSConfig() (*tls.Config, error) { + // genrate key pair + cert, err := GetInternalCertPair() + if err != nil { + return nil, fmt.Errorf("internal TLS enabled but can't get cert file %w", err) + } + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS12, + }, nil +} + +// NewServerTLSConfig returns a modern tls config, +// refer to https://blog.cloudflare.com/exposing-go-on-the-internet/ +func NewServerTLSConfig() *tls.Config { + return &tls.Config{ + PreferServerCipherSuites: true, + CurvePreferences: []tls.CurveID{ + tls.CurveP256, + tls.X25519, + }, + MinVersion: tls.VersionTLS12, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + }, + } +} diff --git a/registry/app/common/http/transport.go b/registry/app/common/http/transport.go new file mode 100644 index 000000000..40dfe94b9 --- /dev/null +++ b/registry/app/common/http/transport.go @@ -0,0 +1,137 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +const ( + // InsecureTransport used to get the insecure http Transport. + InsecureTransport = iota + // SecureTransport used to get the external secure http Transport. + SecureTransport +) + +var ( + secureHTTPTransport http.RoundTripper + insecureHTTPTransport http.RoundTripper +) + +func init() { + insecureHTTPTransport = NewTransport(WithInsecureSkipVerify(true)) + if InternalTLSEnabled() { + secureHTTPTransport = NewTransport(WithInternalTLSConfig()) + } else { + secureHTTPTransport = NewTransport() + } +} + +// Use this instead of Default Transport in library because it sets ForceAttemptHTTP2 to true +// And that options introduced in go 1.13 will cause the https requests hang forever in replication environment. +func newDefaultTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} + +// WithInternalTLSConfig returns a TransportOption that configures the transport to use the internal TLS configuration. +func WithInternalTLSConfig() func(*http.Transport) { + return func(tr *http.Transport) { + tlsConfig, err := GetInternalTLSConfig() + if err != nil { + panic(err) + } + tr.TLSClientConfig = tlsConfig + } +} + +// WithInsecureSkipVerify returns a TransportOption that configures the +// transport to skip verification of the server's certificate. +func WithInsecureSkipVerify(skipVerify bool) func(*http.Transport) { + return func(tr *http.Transport) { + tr.TLSClientConfig.InsecureSkipVerify = skipVerify + } +} + +// WithMaxIdleConns returns a TransportOption that configures the +// transport to use the specified number of idle connections per host. +func WithMaxIdleConns(maxIdleConns int) func(*http.Transport) { + return func(tr *http.Transport) { + tr.MaxIdleConns = maxIdleConns + } +} + +// WithIdleconnectionTimeout returns a TransportOption that configures +// the transport to use the specified idle connection timeout. +func WithIdleconnectionTimeout(idleConnectionTimeout time.Duration) func(*http.Transport) { + return func(tr *http.Transport) { + tr.IdleConnTimeout = idleConnectionTimeout + } +} + +// NewTransport returns a new http.Transport with the specified options. +func NewTransport(opts ...func(*http.Transport)) http.RoundTripper { + tr := newDefaultTransport() + for _, opt := range opts { + opt(tr) + } + return tr +} + +// TransportConfig is the configuration for http transport. +type TransportConfig struct { + Insecure bool +} + +// TransportOption is the option for http transport. +type TransportOption func(*TransportConfig) + +// WithInsecure returns a TransportOption that configures the +// transport to skip verification of the server's certificate. +func WithInsecure(skipVerify bool) TransportOption { + return func(cfg *TransportConfig) { + cfg.Insecure = skipVerify + } +} + +// GetHTTPTransport returns HttpTransport based on insecure configuration. +func GetHTTPTransport(opts ...TransportOption) http.RoundTripper { + cfg := &TransportConfig{} + for _, opt := range opts { + opt(cfg) + } + if cfg.Insecure { + return insecureHTTPTransport + } + return secureHTTPTransport +} diff --git a/registry/app/common/http/transport_test.go b/registry/app/common/http/transport_test.go new file mode 100644 index 000000000..ae60dfed8 --- /dev/null +++ b/registry/app/common/http/transport_test.go @@ -0,0 +1,30 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetHTTPTransport(t *testing.T) { + transport := GetHTTPTransport() + assert.Equal(t, secureHTTPTransport, transport, "Transport should be secure") + transport = GetHTTPTransport(WithInsecure(true)) + assert.Equal(t, insecureHTTPTransport, transport, "Transport should be insecure") +} diff --git a/registry/app/common/lib/authorizer.go b/registry/app/common/lib/authorizer.go new file mode 100644 index 000000000..0a5809ef1 --- /dev/null +++ b/registry/app/common/lib/authorizer.go @@ -0,0 +1,24 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lib + +import ( + "github.com/harness/gitness/registry/app/common/http/modifier" +) + +// Authorizer authorizes the request. +type Authorizer modifier.Modifier diff --git a/registry/app/common/lib/errors/const.go b/registry/app/common/lib/errors/const.go new file mode 100644 index 000000000..d699ed025 --- /dev/null +++ b/registry/app/common/lib/errors/const.go @@ -0,0 +1,71 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +const ( + // NotFoundCode is code for the error of no object found. + NotFoundCode = "NOT_FOUND" + // ConflictCode ... + ConflictCode = "CONFLICT" + // UnAuthorizedCode ... + UnAuthorizedCode = "UNAUTHORIZED" + // BadRequestCode ... + BadRequestCode = "BAD_REQUEST" + // ForbiddenCode ... + ForbiddenCode = "FORBIDDEN" + // MethodNotAllowedCode ... + MethodNotAllowedCode = "METHOD_NOT_ALLOWED" + // RateLimitCode. + RateLimitCode = "TOO_MANY_REQUEST" + // PreconditionCode ... + PreconditionCode = "PRECONDITION" + // GeneralCode ... + GeneralCode = "UNKNOWN" + // DENIED it's used by middleware(readonly, vul and content trust) + // and returned to docker client to index the request is denied. + DENIED = "DENIED" + // PROJECTPOLICYVIOLATION ... + PROJECTPOLICYVIOLATION = "PROJECTPOLICYVIOLATION" + // ViolateForeignKeyConstraintCode is the error code for violating foreign key constraint error. + ViolateForeignKeyConstraintCode = "VIOLATE_FOREIGN_KEY_CONSTRAINT" + // DIGESTINVALID ... + DIGESTINVALID = "DIGEST_INVALID" + // MANIFESTINVALID ... + MANIFESTINVALID = "MANIFEST_INVALID" + // UNSUPPORTED is for digest UNSUPPORTED error. + UNSUPPORTED = "UNSUPPORTED" +) + +// NotFoundError is error for the case of object not found. +func NotFoundError(err error) *Error { + return New("resource not found").WithCode(NotFoundCode).WithCause(err) +} + +// UnknownError ... +func UnknownError(err error) *Error { + return New("unknown").WithCode(GeneralCode).WithCause(err) +} + +// IsNotFoundErr returns true when the error is NotFoundError. +func IsNotFoundErr(err error) bool { + return IsErr(err, NotFoundCode) +} + +// IsRateLimitError checks whether the err chains contains rate limit error. +func IsRateLimitError(err error) bool { + return IsErr(err, RateLimitCode) +} diff --git a/registry/app/common/lib/errors/errors.go b/registry/app/common/lib/errors/errors.go new file mode 100644 index 000000000..dc1559b79 --- /dev/null +++ b/registry/app/common/lib/errors/errors.go @@ -0,0 +1,207 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/rs/zerolog/log" +) + +var ( + // As alias function of `errors.As`. + As = errors.As + // Is alias function of `errors.Is`. + Is = errors.Is +) + +// Error ... +type Error struct { + Cause error `json:"-"` + Code string `json:"code"` + Message string `json:"message"` + Stack *stack `json:"-"` +} + +// Error returns a human readable error, error.Error() will not +// contains the track information. Needs it? just call error.StackTrace() +// Code will not be in the error output. +func (e *Error) Error() string { + out := e.Message + if e.Cause != nil { + out = out + ": " + e.Cause.Error() + } + return out +} + +// StackTrace ... +func (e *Error) StackTrace() string { + return e.Stack.frames().format() +} + +// MarshalJSON ... +func (e *Error) MarshalJSON() ([]byte, error) { + return json.Marshal( + &struct { + Code string `json:"code"` + Message string `json:"message"` + }{ + Code: e.Code, + Message: e.Error(), + }, + ) +} + +// WithMessage ... +func (e *Error) WithMessage(format string, v ...interface{}) *Error { + e.Message = fmt.Sprintf(format, v...) + return e +} + +// WithCode ... +func (e *Error) WithCode(code string) *Error { + e.Code = code + return e +} + +// WithCause ... +func (e *Error) WithCause(err error) *Error { + e.Cause = err + return e +} + +// Unwrap ... +func (e *Error) Unwrap() error { return e.Cause } + +// Errors ... +type Errors []error + +var _ error = Errors{} + +// Error converts slice of error. +func (errs Errors) Error() string { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, e := range errs { + var err *Error + ok := errors.As(e, &err) + if !ok { + err = UnknownError(e) + } + if err.Code == "" { + err.Code = GeneralCode + } + + tmpErrs.Errors = append(tmpErrs.Errors, *err) + } + + msg, err := json.Marshal(tmpErrs) + if err != nil { + log.Error().Stack().Err(err).Msg("") + return "{}" + } + return string(msg) +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// NewErrs ... +func NewErrs(err error) Errors { + return Errors{err} +} + +// New ... +func New(in interface{}) *Error { + var err error + switch in := in.(type) { + case error: + err = in + default: + err = fmt.Errorf("%v", in) + } + + return &Error{ + Message: err.Error(), + Stack: newStack(), + } +} + +// Wrap ... +func Wrap(err error, message string) *Error { + if err == nil { + return nil + } + e := &Error{ + Cause: err, + Message: message, + Stack: newStack(), + } + return e +} + +// Wrapf ... +func Wrapf(err error, format string, args ...interface{}) *Error { + if err == nil { + return nil + } + e := &Error{ + Cause: err, + Message: fmt.Sprintf(format, args...), + Stack: newStack(), + } + return e +} + +// Errorf ... +func Errorf(format string, args ...interface{}) *Error { + return &Error{ + Message: fmt.Sprintf(format, args...), + Stack: newStack(), + } +} + +// IsErr checks whether the err chain contains error matches the code. +func IsErr(err error, code string) bool { + var e *Error + if As(err, &e) { + return e.Code == code + } + return false +} + +// ErrCode returns code of err. +func ErrCode(err error) string { + if err == nil { + return "" + } + + var e *Error + if ok := As(err, &e); ok && e.Code != "" { + return e.Code + } else if ok && e.Cause != nil { + return ErrCode(e.Cause) + } + + return GeneralCode +} diff --git a/registry/app/common/lib/errors/stack.go b/registry/app/common/lib/errors/stack.go new file mode 100644 index 000000000..d30770ff6 --- /dev/null +++ b/registry/app/common/lib/errors/stack.go @@ -0,0 +1,63 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "runtime" + "strings" +) + +const maxDepth = 50 + +type stack []uintptr + +func (s *stack) frames() StackFrames { + var stackFrames StackFrames + frames := runtime.CallersFrames(*s) + for { + frame, next := frames.Next() + // filter out runtime + if !strings.Contains(frame.File, "runtime/") { + stackFrames = append(stackFrames, frame) + } + if !next { + break + } + } + return stackFrames +} + +// newStack ... +func newStack() *stack { + var pcs [maxDepth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// StackFrames ... +type StackFrames []runtime.Frame + +// Output: :, . +func (frames StackFrames) format() string { + var msg string + for _, frame := range frames { + msg += fmt.Sprintf("\n%v:%v, %v", frame.File, frame.Line, frame.Function) + } + return msg +} diff --git a/registry/app/common/lib/errors/stack_test.go b/registry/app/common/lib/errors/stack_test.go new file mode 100644 index 000000000..2847e3bc6 --- /dev/null +++ b/registry/app/common/lib/errors/stack_test.go @@ -0,0 +1,47 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "testing" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/suite" +) + +type stackTestSuite struct { + suite.Suite +} + +func (c *stackTestSuite) SetupTest() {} + +func (c *stackTestSuite) TestFrame() { + stack := newStack() + frames := stack.frames() + c.Equal(len(frames), 4) + log.Info().Msg(frames.format()) +} + +func (c *stackTestSuite) TestFormat() { + stack := newStack() + frames := stack.frames() + c.Contains(frames[len(frames)-1].Function, "testing.tRunner") +} + +func TestStackTestSuite(t *testing.T) { + suite.Run(t, &stackTestSuite{}) +} diff --git a/registry/app/common/lib/link.go b/registry/app/common/lib/link.go new file mode 100644 index 000000000..5b1920186 --- /dev/null +++ b/registry/app/common/lib/link.go @@ -0,0 +1,92 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lib + +import ( + "fmt" + "strings" +) + +// Link defines the model that describes the HTTP link header. +type Link struct { + URL string + Rel string + Attrs map[string]string +} + +// String returns the string representation of a link. +func (l *Link) String() string { + s := fmt.Sprintf("<%s>", l.URL) + if len(l.Rel) > 0 { + s = fmt.Sprintf(`%s; rel="%s"`, s, l.Rel) + } + for key, value := range l.Attrs { + s = fmt.Sprintf(`%s; %s="%s"`, s, key, value) + } + return s +} + +// Links is a link object array. +type Links []*Link + +// String returns the string representation of links. +func (l Links) String() string { + var strs []string + for _, link := range l { + strs = append(strs, link.String()) + } + return strings.Join(strs, " , ") +} + +// ParseLinks parses the link header into Links +// e.g. ; rel="previous"; +// title="previous chapter" , ; rel="next"; title="next chapter". +func ParseLinks(str string) Links { + var links Links + for _, lk := range strings.Split(str, ",") { + link := &Link{ + Attrs: map[string]string{}, + } + for _, attr := range strings.Split(lk, ";") { + attr = strings.TrimSpace(attr) + if len(attr) == 0 { + continue + } + if attr[0] == '<' && attr[len(attr)-1] == '>' { + link.URL = attr[1 : len(attr)-1] + continue + } + + parts := strings.SplitN(attr, "=", 2) + key := parts[0] + value := "" + if len(parts) == 2 { + value = strings.Trim(parts[1], `"`) + } + if key == "rel" { + link.Rel = value + } else { + link.Attrs[key] = value + } + } + if len(link.URL) == 0 { + continue + } + links = append(links, link) + } + return links +} diff --git a/registry/app/common/url_utils.go b/registry/app/common/url_utils.go new file mode 100644 index 000000000..aa9046803 --- /dev/null +++ b/registry/app/common/url_utils.go @@ -0,0 +1,31 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "net/url" +) + +func GenerateOciTokenURL(registryURL string) string { + return registryURL + "/v2/token" +} + +func GenerateSetupClientHostname(registryURL string) string { + regURL, err := url.Parse(registryURL) + if err != nil { + return "" + } + return regURL.Host +} diff --git a/registry/app/dist_temp/challenge/addr.go b/registry/app/dist_temp/challenge/addr.go new file mode 100644 index 000000000..cf9ada1bf --- /dev/null +++ b/registry/app/dist_temp/challenge/addr.go @@ -0,0 +1,43 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/registry/app/dist_temp/challenge/authchallenge.go b/registry/app/dist_temp/challenge/authchallenge.go new file mode 100644 index 000000000..15be888bf --- /dev/null +++ b/registry/app/dist_temp/challenge/authchallenge.go @@ -0,0 +1,253 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manager which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i++; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/registry/app/dist_temp/challenge/authchallenge_test.go b/registry/app/dist_temp/challenge/authchallenge_test.go new file mode 100644 index 000000000..6de668142 --- /dev/null +++ b/registry/app/dist_temp/challenge/authchallenge_test.go @@ -0,0 +1,149 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add( + "WWW-Authenticate", + `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`, + ) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } +} + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") + testAuthChallengeNormalization(t, "reg.example.com:80") + testAuthChallengeConcurrent(t, "reg.EXAMPLE.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add( + "WWW-Authenticate", + fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host), + ) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + lowered.Host = canonicalAddr(&lowered) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} + +func testAuthChallengeConcurrent(t *testing.T, host string) { + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add( + "WWW-Authenticate", + fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host), + ) + var s sync.WaitGroup + s.Add(2) + go func() { + defer s.Done() + for i := 0; i < 200; i++ { + err = scm.AddResponse(resp) + if err != nil { + t.Error(err) + } + } + }() + go func() { + defer s.Done() + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + for k := 0; k < 200; k++ { + _, err := scm.GetChallenges(lowered) + if err != nil { + t.Error(err) + } + } + }() + s.Wait() +} diff --git a/registry/app/dist_temp/dcontext/context.go b/registry/app/dist_temp/dcontext/context.go new file mode 100644 index 000000000..7dac1af57 --- /dev/null +++ b/registry/app/dist_temp/dcontext/context.go @@ -0,0 +1,91 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + "sync" + + "github.com/google/uuid" +) + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + context.Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do( + func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/distribution/distribution/issues/782 + ic.id = uuid.NewString() + }, + ) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() context.Context { + return background +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok1 := smc.m[ks]; ok1 { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/registry/app/dist_temp/dcontext/doc.go b/registry/app/dist_temp/dcontext/doc.go new file mode 100644 index 000000000..be7e33da2 --- /dev/null +++ b/registry/app/dist_temp/dcontext/doc.go @@ -0,0 +1,17 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext diff --git a/registry/app/dist_temp/dcontext/http.go b/registry/app/dist_temp/dcontext/http.go new file mode 100644 index 000000000..d85337d52 --- /dev/null +++ b/registry/app/dist_temp/dcontext/http.go @@ -0,0 +1,257 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + "errors" + "net/http" + "strings" + "sync" + "time" + + "github.com/harness/gitness/registry/app/dist_temp/requestutil" + + "github.com/google/uuid" + "github.com/gorilla/mux" + "github.com/rs/zerolog" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx context.Context, r *http.Request) context.Context { + if ctx.Value("http.request") != nil { + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.NewString(), + r: r, + } +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx context.Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx context.Context, r *http.Request) context.Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx context.Context, l *zerolog.Event) Logger { + logger := getZerologLogger( + ctx, l, + "http.response.written", + "http.response.status", + "http.response.contenttype", + ) + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + logger = logger.Str("http.response.duration", duration.String()) + } + + return logger +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + context.Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI. +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + switch keyStr { + case "http.request": + return ctx.r + case "http.request.uri": + return ctx.r.RequestURI + case "http.request.remoteaddr": + return requestutil.RemoteAddr(ctx.r) + case "http.request.method": + return ctx.r.Method + case "http.request.host": + return ctx.r.Host + case "http.request.referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "http.request.useragent": + return ctx.r.UserAgent() + case "http.request.id": + return ctx.id + case "http.request.startedat": + return ctx.startedAt + case "http.request.contenttype": + if ct := ctx.r.Header.Get("Content-Type"); ct != "" { + return ct + } + default: + // no match; fall back to standard behavior below + } + } + + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + context.Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + // We need to check if that's intentional (could be a bug). + if v, ok1 := ctx.vars[strings.TrimPrefix(keyStr, "vars.")]; ok1 { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + context.Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + switch keyStr { + case "http.response": + return irw + case "http.response.written": + irw.mu.Lock() + defer irw.mu.Unlock() + return irw.written + case "http.response.status": + irw.mu.Lock() + defer irw.mu.Unlock() + return irw.status + case "http.response.contenttype": + if ct := irw.Header().Get("Content-Type"); ct != "" { + return ct + } + default: + // no match; fall back to standard behavior below + } + } + + return irw.Context.Value(key) +} diff --git a/registry/app/dist_temp/dcontext/http_test.go b/registry/app/dist_temp/dcontext/http_test.go new file mode 100644 index 000000000..63fa33ec8 --- /dev/null +++ b/registry/app/dist_temp/dcontext/http_test.go @@ -0,0 +1,241 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "net/http" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = http.MethodGet + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, tc := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(tc.key) + + if v == nil { + t.Fatalf("value not found for %q", tc.key) + } + + if tc.expected != nil && v != tc.expected { + t.Fatalf("%s: %v != %v", tc.key, v, tc.expected) + } + + // Key specific checks! + switch tc.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf( + "response status should always be a number and should be zero here: %v != 0", + ctx.Value("http.response.status"), + ) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf( + "unexpected response status in context: %v != %v", + ctx.Value("http.response.status"), + http.StatusBadRequest, + ) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, tc := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(tc.key) + + if !reflect.DeepEqual(v, tc.expected) { + t.Fatalf("%q: %v != %v", tc.key, v, tc.expected) + } + } +} diff --git a/registry/app/dist_temp/dcontext/logger.go b/registry/app/dist_temp/dcontext/logger.go new file mode 100644 index 000000000..877f764cb --- /dev/null +++ b/registry/app/dist_temp/dcontext/logger.go @@ -0,0 +1,98 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + "fmt" + "runtime" + + "github.com/rs/zerolog" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + Msgf(format string, v ...interface{}) + + Msg(msg string) +} + +type loggerKey struct{} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields( + ctx context.Context, log *zerolog.Event, + fields map[interface{}]interface{}, keys ...interface{}, +) Logger { + logger := getZerologLogger(ctx, log, keys...) + for key, value := range fields { + logger.Interface(fmt.Sprint(key), value) + } + + return logger +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx context.Context, l *zerolog.Event, keys ...interface{}) Logger { + return getZerologLogger(ctx, l, keys...) +} + +// getZerologLogger returns the zerolog logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific zerolog functionality is +// required. +func getZerologLogger(ctx context.Context, l *zerolog.Event, keys ...interface{}) *zerolog.Event { + var logger *zerolog.Event + + // Get a logger, if it is present. + loggerInterface := ctx.Value(loggerKey{}) + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*zerolog.Event); ok { + logger = lgr + } + } + + if logger == nil { + logger = l.Str("go.version", runtime.Version()) + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + logger.Interface("instance.id", instanceID) + } + } + + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + logger.Interface(fmt.Sprint(key), v) + } + } + + return logger +} diff --git a/registry/app/dist_temp/dcontext/trace.go b/registry/app/dist_temp/dcontext/trace.go new file mode 100644 index 000000000..8bd18144e --- /dev/null +++ b/registry/app/dist_temp/dcontext/trace.go @@ -0,0 +1,126 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + "runtime" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s +// +// trace.func=github.com/distribution/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.NewString(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger( + ctx, log.Info(), + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line", + ). + Msgf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + context.Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/registry/app/dist_temp/dcontext/trace_test.go b/registry/app/dist_temp/dcontext/trace_test.go new file mode 100644 index 000000000..876161ffe --- /dev/null +++ b/registry/app/dist_temp/dcontext/trace_test.go @@ -0,0 +1,129 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + t.Parallel() + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + t.Cleanup(func() { done("this will be emitted at end of test") }) + + tests := base + tests = append( + tests, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, + ) + for _, tc := range tests { + tc := tc + t.Run( + tc.key, func(t *testing.T) { + t.Parallel() + v := ctx.Value(tc.key) + if tc.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty: %#v", v) + } + return + } + + if v != tc.expected { + t.Fatalf("unexpected value: %v != %v", v, tc.expected) + } + }, + ) + } + + tracedFn := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc1, _, _, _ := runtime.Caller(0) // get current caller. + f1 := runtime.FuncForPC(pc1) + ctx, done1 := WithTrace(ctx) + defer done1("this should be subordinate to the other trace") + time.Sleep(time.Second) + tests1 := base + tests1 = append( + tests1, valueTestCase{ + key: "trace.func", + expected: f1.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + }, + ) + for _, tc := range tests1 { + tc := tc + t.Run( + tc.key, func(t *testing.T) { + t.Parallel() + v := ctx.Value(tc.key) + if tc.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty: %#v", v) + } + return + } + + if v != tc.expected { + t.Fatalf("unexpected value: %v != %v", v, tc.expected) + } + }, + ) + } + } + tracedFn() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} diff --git a/registry/app/dist_temp/dcontext/util.go b/registry/app/dist_temp/dcontext/util.go new file mode 100644 index 000000000..4bbcb41db --- /dev/null +++ b/registry/app/dist_temp/dcontext/util.go @@ -0,0 +1,41 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx context.Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx context.Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/registry/app/dist_temp/dcontext/version.go b/registry/app/dist_temp/dcontext/version.go new file mode 100644 index 000000000..133035ced --- /dev/null +++ b/registry/app/dist_temp/dcontext/version.go @@ -0,0 +1,42 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import ( + "context" + + "github.com/rs/zerolog/log" +) + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, log.Info(), versionKey{})) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) +} diff --git a/registry/app/dist_temp/dcontext/version_test.go b/registry/app/dist_temp/dcontext/version_test.go new file mode 100644 index 000000000..943c1e039 --- /dev/null +++ b/registry/app/dist_temp/dcontext/version_test.go @@ -0,0 +1,35 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dcontext + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/registry/app/dist_temp/errcode/errors.go b/registry/app/dist_temp/errcode/errors.go new file mode 100644 index 000000000..8d88dac6c --- /dev/null +++ b/registry/app/dist_temp/errcode/errors.go @@ -0,0 +1,289 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each. +type ErrorCoder interface { + ErrorCode() CodeError +} + +// CodeError represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type CodeError int + +var _ error = CodeError(0) + +// ErrorCode just returns itself. +func (ec CodeError) ErrorCode() CodeError { + return ec +} + +// Error returns the ID/Value. +func (ec CodeError) Error() string { + return strings.ToLower(strings.ReplaceAll(ec.String(), "_", " ")) +} + +// Descriptor returns the descriptor for the error code. +func (ec CodeError) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec CodeError) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec CodeError) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec CodeError) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *CodeError) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec CodeError) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately. +func (ec CodeError) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice. +func (ec CodeError) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code CodeError `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error. +func (e Error) ErrorCode() CodeError { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added. +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error. +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code CodeError + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable description of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) CodeError { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes. +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case CodeError: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrCodeUnknown.WithDetail(daErr) + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErr := Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + } + + // if the detail contains error extract the error message + // otherwise json.Marshal will not serialize it at all + // https://github.com/golang/go/issues/10748 + if detail, ok := tmpErr.Detail.(error); ok { + tmpErr.Detail = detail.Error() + } + + tmpErrs.Errors = append(tmpErrs.Errors, tmpErr) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode. +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append( + newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }, + ) + } + } + + *errs = newErrs + return nil +} diff --git a/registry/app/dist_temp/errcode/handler.go b/registry/app/dist_temp/errcode/handler.go new file mode 100644 index 000000000..49efc5107 --- /dev/null +++ b/registry/app/dist_temp/errcode/handler.go @@ -0,0 +1,56 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/registry/app/dist_temp/errcode/register.go b/registry/app/dist_temp/errcode/register.go new file mode 100644 index 000000000..15e188d93 --- /dev/null +++ b/registry/app/dist_temp/errcode/register.go @@ -0,0 +1,509 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcode + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "sort" + "sync" + "syscall" + + storagedriver "github.com/harness/gitness/registry/app/driver" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "google.golang.org/api/googleapi" +) + +var ( + errorCodeToDescriptors = map[CodeError]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used. + ErrCodeUnknown = register( + "errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }, + ) + + // ErrCodeUnsupported is returned when an operation is not supported. + ErrCodeUnsupported = register( + "errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }, + ) + + // ErrCodeUnauthorized is returned if a request requires + // authentication. + ErrCodeUnauthorized = register( + "errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }, + ) + + // ErrCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrCodeDenied = register( + "errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }, + ) + + // ErrCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrCodeUnavailable = register( + "errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ) + + // ErrCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrCodeTooManyRequests = register( + "errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }, + ) + + // ErrCodeConnectionReset provides an error to report a client dropping the + // connection. + ErrCodeConnectionReset = register( + "errcode", ErrorDescriptor{ + Value: "CONNECTIONRESET", + Message: "connection reset by peer", + Description: "Returned when the client closes the connection unexpectedly", + // 400 is the most fitting error code in the HTTP spec, 499 is used by + // nginx (and within this project as well), and is specific to this scenario, + // but it is preferable to stay within the spec. + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeRequestCanceled provides an error to report a canceled request. This is usually due to a + // context.Canceled error. + ErrCodeRequestCanceled = register( + "errcode", ErrorDescriptor{ + Value: "REQUESTCANCELED", + Message: "request canceled", + Description: "Returned when the client cancels the request", + HTTPStatusCode: http.StatusBadRequest, + }, + ) +) + +const errGroup = "registry.api.v2" + +var ( + // ErrCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrCodeDigestInvalid = register( + errGroup, ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeSizeInvalid is returned when uploading a blob if the provided. + ErrCodeSizeInvalid = register( + errGroup, ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeRangeInvalid is returned when uploading a blob if the provided + // content range is invalid. + ErrCodeRangeInvalid = register( + errGroup, ErrorDescriptor{ + Value: "RANGE_INVALID", + Message: "invalid content range", + Description: `When a layer is uploaded, the provided range is checked + against the uploaded chunk. This error is returned if the range is + out of order.`, + HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + ) + + // ErrCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrCodeNameInvalid = register( + errGroup, ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrCodeTagInvalid = register( + errGroup, ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeNameUnknown when the repository name is not known. + ErrCodeNameUnknown = register( + errGroup, ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }, + ) + + // ErrCodeManifestUnknown returned when image manifest is unknown. + ErrCodeManifestUnknown = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }, + ) + + // ErrCodeManifestReferencedInList is returned when attempting to delete a manifest that is still referenced by at + // least one manifest list. + ErrCodeManifestReferencedInList = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_REFERENCED", + Message: "manifest referenced by a manifest list", + Description: `The manifest is still referenced by at least one manifest list and therefore the delete cannot + proceed.`, + HTTPStatusCode: http.StatusConflict, + }, + ) + + // ErrCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrCodeManifestInvalid = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrCodeManifestUnverified = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeManifestReferenceLimit is returned when a manifest has more + // references than the configured limit. + ErrCodeManifestReferenceLimit = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_REFERENCE_LIMIT", + Message: "too many manifest references", + Description: `This error may be returned when a manifest references more than + the configured limit allows.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeManifestPayloadSizeLimit is returned when a manifest payload is + // bigger than the configured limit. + ErrCodeManifestPayloadSizeLimit = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_SIZE_LIMIT", + Message: "payload size limit exceeded", + Description: `This error may be returned when a manifest payload size is bigger than + the configured limit allows.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrCodeManifestBlobUnknown = register( + errGroup, ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) + + // ErrCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrCodeBlobUnknown = register( + errGroup, ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }, + ) + + // ErrCodeBlobUploadUnknown is returned when an upload is unknown. + ErrCodeBlobUploadUnknown = register( + errGroup, ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }, + ) + + // ErrCodeBlobUploadInvalid is returned when an upload is invalid. + ErrCodeBlobUploadInvalid = register( + errGroup, ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }, + ) + + // ErrCodePaginationNumberInvalid is returned when the `n` parameter is + // not an integer, or `n` is negative. + ErrCodePaginationNumberInvalid = register( + errGroup, ErrorDescriptor{ + Value: "PAGINATION_NUMBER_INVALID", + Message: "invalid number of results requested", + Description: `Returned when the "n" parameter (number of results + to return) is not an integer, "n" is negative or "n" is bigger than + the maximum allowed.`, + HTTPStatusCode: http.StatusBadRequest, + }, + ) +) + +const gitnessErrGroup = "gitness.api.v1" + +var ( + ErrCodeRootNotFound = register( + gitnessErrGroup, ErrorDescriptor{ + Value: "ROOT_NOT_FOUND", + Message: "Root not found", + Description: "The root does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ) + ErrCodeParentNotFound = register( + gitnessErrGroup, ErrorDescriptor{ + Value: "PARENT_NOT_FOUND", + Message: "Parent not found", + Description: "The parent does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ) + ErrCodeRegNotFound = register( + gitnessErrGroup, ErrorDescriptor{ + Value: "REGISTRY_NOT_FOUND", + Message: "registry not found", + Description: "The registry does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ) +) + +var ( + nextCode = 1000 + registerLock sync.Mutex +) + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode. +func Register(group string, descriptor ErrorDescriptor) CodeError { + return register(group, descriptor) +} + +// register will make the passed-in error known to the environment and +// return a new ErrorCode. +func register(group string, descriptor ErrorDescriptor) CodeError { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = CodeError(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered. +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors. +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in. +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} + +// FromUnknownError will try to parse an unknown error and infer the appropriate Error to use. +func FromUnknownError(err error) Error { + // return if this is an Error already + var e Error + if errors.As(err, &e) { + return e + } + + // if this is a storage driver catch-all error (storagedriver.Error), extract the enclosed error + var sdErr storagedriver.Error + if errors.As(err, &sdErr) { + err = sdErr.Detail + } + + // use 503 Service Unavailable for network timeout errors + var netError net.Error + if ok := errors.As(err, &netError); ok && netError.Timeout() { + return ErrCodeUnavailable.WithDetail(err) + } + + var netOpError *net.OpError + if errors.As(err, &netOpError) { + // use 400 Bad Request if the client drops the connection during the request + var syscallErr *os.SyscallError + if errors.As(err, &syscallErr) && errors.Is(syscallErr.Err, syscall.ECONNRESET) { + return ErrCodeConnectionReset.WithDetail(err) + } + + // use 503 Service Unavailable for network connection refused or unknown host errors + return ErrCodeUnavailable.WithDetail(err) + } + + // use 400 Bad Request for canceled requests + if errors.Is(err, context.Canceled) { + return ErrCodeRequestCanceled.WithDetail(err) + } + + // use 503 Service Unavailable for database connection failures + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) && pgerrcode.IsConnectionException(pgErr.Code) { + return ErrCodeUnavailable.WithDetail(err) + } + + // propagate a 503 Service Unavailable status from the storage backends + var gcsErr *googleapi.Error + if errors.As(err, &gcsErr) { + if gcsErr.Code == http.StatusServiceUnavailable { + return ErrCodeUnavailable.WithDetail(gcsErr.Error()) + } + } + + // otherwise, we're not sure what the error is or how to react, use 500 Internal Server Error + return ErrCodeUnknown.WithDetail(err) +} diff --git a/registry/app/dist_temp/requestutil/util.go b/registry/app/dist_temp/requestutil/util.go new file mode 100644 index 000000000..20d12332c --- /dev/null +++ b/registry/app/dist_temp/requestutil/util.go @@ -0,0 +1,67 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package requestutil + +import ( + "net" + "net/http" + "strings" + + "github.com/rs/zerolog/log" +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warn().Msgf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + remoteAddr, _, _ := strings.Cut(prior, ",") + remoteAddr = strings.Trim(remoteAddr, " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} diff --git a/registry/app/dist_temp/requestutil/util_test.go b/registry/app/dist_temp/requestutil/util_test.go new file mode 100644 index 000000000..f8960883e --- /dev/null +++ b/registry/app/dist_temp/requestutil/util_test.go @@ -0,0 +1,96 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package requestutil + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "testing" +) + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(http.StatusOK) + }, + ), + ) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest(http.MethodGet, frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest(http.MethodGet, backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + resp, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + resp, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() +} diff --git a/registry/app/driver/base/base.go b/registry/app/driver/base/base.go new file mode 100644 index 000000000..161f907e1 --- /dev/null +++ b/registry/app/driver/base/base.go @@ -0,0 +1,243 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "context" + "errors" + "io" + + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/driver" + + "github.com/rs/zerolog/log" +) + +func init() { + +} + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + driver.StorageDriver +} + +// Format errors received from the storage driver. +func (base *Base) setDriverName(e error) error { + if e == nil { + return nil + } + switch { + case errors.As(e, &driver.UnsupportedMethodError{}): + var e1 driver.UnsupportedMethodError + errors.As(e, &e1) + e1.DriverName = base.StorageDriver.Name() + return e1 + case errors.As(e, &driver.PathNotFoundError{}): + var e2 driver.PathNotFoundError + errors.As(e, &e2) + e2.DriverName = base.StorageDriver.Name() + return e2 + case errors.As(e, &driver.InvalidPathError{}): + var e3 driver.InvalidPathError + errors.As(e, &e3) + e3.DriverName = base.StorageDriver.Name() + return e3 + case errors.As(e, &driver.InvalidOffsetError{}): + var e4 driver.InvalidOffsetError + errors.As(e, &e4) + e4.DriverName = base.StorageDriver.Name() + return e4 + default: + return driver.Error{ + DriverName: base.StorageDriver.Name(), + Detail: e, + } + } +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) { + return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + b, e := base.StorageDriver.GetContent(ctx, path) + return b, base.setDriverName(e) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) { + return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + err := base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) + return err +} + +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, driver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !driver.PathRegexp.MatchString(path) { + return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + rc, e := base.StorageDriver.Reader(ctx, path, offset) + return rc, base.setDriverName(e) +} + +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, a bool) (driver.FileWriter, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Writer(%q, %v)", base.Name(), path, a) + + if !driver.PathRegexp.MatchString(path) { + return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + writer, e := base.StorageDriver.Writer(ctx, path, a) + return writer, base.setDriverName(e) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (driver.FileInfo, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) && path != "/" { + return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + fi, e := base.StorageDriver.Stat(ctx, path) + return fi, base.setDriverName(e) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) && path != "/" { + return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.List(ctx, path) + return str, base.setDriverName(e) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !driver.PathRegexp.MatchString(sourcePath) { + return driver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} + } else if !driver.PathRegexp.MatchString(destPath) { + return driver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} + } + + err := base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) + return err +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) { + return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + err := base.setDriverName(base.StorageDriver.Delete(ctx, path)) + return err +} + +// RedirectURL wraps RedirectURL of the underlying storage driver. +func (base *Base) RedirectURL(ctx context.Context, method string, path string) (string, error) { + log.Ctx(ctx).Info().Msgf("RedirectURL(%q, %q)", method, path) + if !driver.PathRegexp.MatchString(path) { + return "", driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.RedirectURL(ctx, method, path) + log.Ctx(ctx).Info().Msgf("Redirect URL generated %s", str) + return str, base.setDriverName(e) +} + +// Walk wraps Walk of underlying storage driver. +func (base *Base) Walk(ctx context.Context, path string, f driver.WalkFn, options ...func(*driver.WalkOptions)) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Walk(%q)", base.Name(), path) + + if !driver.PathRegexp.MatchString(path) && path != "/" { + return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Walk(ctx, path, f, options...)) +} diff --git a/registry/app/driver/base/regulator.go b/registry/app/driver/base/regulator.go new file mode 100644 index 000000000..307f396b0 --- /dev/null +++ b/registry/app/driver/base/regulator.go @@ -0,0 +1,196 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "context" + "fmt" + "io" + "reflect" + "strconv" + "sync" + + storagedriver "github.com/harness/gitness/registry/app/driver" +) + +type regulator struct { + storagedriver.StorageDriver + *sync.Cond + + available uint64 +} + +// GetLimitFromParameter takes an interface type as decoded from the YAML +// configuration and returns a uint64 representing the maximum number of +// concurrent calls given a minimum limit and default. +// +// If the parameter supplied is of an invalid type this returns an error. +func GetLimitFromParameter(param interface{}, min, def uint64) (uint64, error) { + limit := def + + switch v := param.(type) { + case string: + var err error + if limit, err = strconv.ParseUint(v, 0, 64); err != nil { + return limit, fmt.Errorf("parameter must be an integer, '%v' invalid", param) + } + case uint64: + limit = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Int() + // if param is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + limit = uint64(val) + } else { + limit = min + } + case uint, uint32: + limit = reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Uint() + case nil: + // use the default + default: + return 0, fmt.Errorf("invalid value '%#v'", param) + } + + if limit < min { + return min, nil + } + + return limit, nil +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, + } +} + +func (r *regulator) enter() { + r.L.Lock() + for r.available == 0 { + r.Wait() + } + r.available-- + r.L.Unlock() +} + +func (r *regulator) exit() { + r.L.Lock() + r.Signal() + r.available++ + r.L.Unlock() +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Reader(ctx, path, offset) +} + +// Writer stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) Writer(ctx context.Context, path string, a bool) (storagedriver.FileWriter, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Writer(ctx, path, a) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +// given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// RedirectURL returns a URL which may be used to retrieve the content stored at +// the given path. +func (r *regulator) RedirectURL(ctx context.Context, method string, path string) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.RedirectURL(ctx, method, path) +} diff --git a/registry/app/driver/base/regulator_test.go b/registry/app/driver/base/regulator_test.go new file mode 100644 index 000000000..fefadd114 --- /dev/null +++ b/registry/app/driver/base/regulator_test.go @@ -0,0 +1,120 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "fmt" + "sync" + "testing" + "time" +) + +func TestRegulatorEnterExit(t *testing.T) { + const limit = 500 + + r, ok := NewRegulator(nil, limit).(*regulator) + if !ok { + t.Fatalf("Error: r is not of type *regulator") + return + } + + for try := 0; try < 50; try++ { + run := make(chan struct{}) + + var firstGroupReady sync.WaitGroup + var firstGroupDone sync.WaitGroup + firstGroupReady.Add(limit) + firstGroupDone.Add(limit) + for i := 0; i < limit; i++ { + go func() { + r.enter() + firstGroupReady.Done() + <-run + r.exit() + firstGroupDone.Done() + }() + } + firstGroupReady.Wait() + + // now we exhausted all the limit, let's run a little bit more + var secondGroupReady sync.WaitGroup + var secondGroupDone sync.WaitGroup + for i := 0; i < 50; i++ { + secondGroupReady.Add(1) + secondGroupDone.Add(1) + go func() { + secondGroupReady.Done() + r.enter() + r.exit() + secondGroupDone.Done() + }() + } + secondGroupReady.Wait() + + // allow the first group to return resources + close(run) + + done := make(chan struct{}) + go func() { + secondGroupDone.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("some r.enter() are still locked") + } + + firstGroupDone.Wait() + + if r.available != limit { + t.Fatalf("r.available: got %d, want %d", r.available, limit) + } + } +} + +func TestGetLimitFromParameter(t *testing.T) { + tests := []struct { + Input interface{} + Expected uint64 + Min uint64 + Default uint64 + Err error + }{ + {"foo", 0, 5, 5, fmt.Errorf("parameter must be an integer, 'foo' invalid")}, + {"50", 50, 5, 5, nil}, + {"5", 25, 25, 50, nil}, // lower than Min returns Min + {nil, 50, 25, 50, nil}, // nil returns default + {812, 812, 25, 50, nil}, + } + + for _, item := range tests { + t.Run( + fmt.Sprint(item.Input), func(t *testing.T) { + actual, err := GetLimitFromParameter(item.Input, item.Min, item.Default) + + if err != nil && item.Err != nil && err.Error() != item.Err.Error() { + t.Fatalf("GetLimitFromParameter error, expected %#v got %#v", item.Err, err) + } + + if actual != item.Expected { + t.Fatalf("GetLimitFromParameter result error, expected %d got %d", item.Expected, actual) + } + }, + ) + } +} diff --git a/registry/app/driver/factory/factory.go b/registry/app/driver/factory/factory.go new file mode 100644 index 000000000..19d18ec01 --- /dev/null +++ b/registry/app/driver/factory/factory.go @@ -0,0 +1,80 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package factory + +import ( + "fmt" + + "github.com/harness/gitness/registry/app/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories. +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (driver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned. +func Create(name string, parameters map[string]interface{}) (driver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver. +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/registry/app/driver/fileinfo.go b/registry/app/driver/fileinfo.go new file mode 100644 index 000000000..b7a283dd7 --- /dev/null +++ b/registry/app/driver/fileinfo.go @@ -0,0 +1,93 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var ( + _ FileInfo = FileInfoInternal{} + _ FileInfo = &FileInfoInternal{} +) + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/registry/app/driver/filesystem/driver.go b/registry/app/driver/filesystem/driver.go new file mode 100644 index 000000000..633bad195 --- /dev/null +++ b/registry/app/driver/filesystem/driver.go @@ -0,0 +1,453 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystem + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "time" + + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/driver/base" + "github.com/harness/gitness/registry/app/driver/factory" + + "github.com/rs/zerolog/log" +) + +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads. + minThreads = uint64(25) +) + +func GetDriverName() string { + return driverName +} + +// DriverParameters represents all configuration options available for the +// filesystem driver. +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} + +// TODO: figure-out why init is not called automatically +func Register() { + log.Info().Msgf("registering filesystem driver") +} + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface. +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +// - maxthreads. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + + if parameters != nil { + if rootDir, ok := parameters["rootdirectory"]; ok { + rootDirectory = fmt.Sprint(rootDir) + } + + maxThreads, err = base.GetLimitFromParameter(parameters["maxthreads"], minThreads, defaultMaxThreads) + if err != nil { + return nil, fmt.Errorf("maxthreads config error: %s", err.Error()) + } + } + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, + } + return params, nil +} + +// New constructs a new Driver with a given rootDirectory. +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := io.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { + return err + } + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + if cErr := writer.Cancel(ctx); cErr != nil { + return errors.Join(err, cErr) + } + return err + } + return writer.Commit(ctx) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(_ context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(offset, io.SeekStart) + if err != nil { + file.Close() + return nil, err + } else if seekPos < offset { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +func (d *driver) Writer(_ context.Context, subPath string, appendFlag bool) (storagedriver.FileWriter, error) { + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0o777); err != nil { + return nil, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666) + if err != nil { + return nil, err + } + + var offset int64 + + if !appendFlag { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, io.SeekEnd) + if err != nil { + fp.Close() + return nil, err + } + offset = n + } + + return newFileWriter(fp, offset), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(_ context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(_ context.Context, subPath string) ([]string, error) { + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(_ context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(_ context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// RedirectURL returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) RedirectURL(_ context.Context, _ string, _ string) (string, error) { + return "", nil +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file and directory. +func (d *driver) Walk( + ctx context.Context, + path string, + f storagedriver.WalkFn, + options ...func(*storagedriver.WalkOptions), +) error { + return storagedriver.WalkFallback(ctx, d, path, f, options...) +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel(_ context.Context) error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit(_ context.Context) error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/registry/app/driver/s3-aws/s3.go b/registry/app/driver/s3-aws/s3.go new file mode 100644 index 000000000..ff2570758 --- /dev/null +++ b/registry/app/driver/s3-aws/s3.go @@ -0,0 +1,1832 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "math" + "net/http" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/driver/base" + "github.com/harness/gitness/registry/app/driver/factory" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/rs/zerolog/log" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB. +const minChunkSize = 5 * 1024 * 1024 + +// maxChunkSize defines the maximum multipart upload chunk size allowed by S3. +// S3 API requires max upload chunk to be 5GB. +const maxChunkSize = 5 * 1024 * 1024 * 1024 + +const defaultChunkSize = 2 * minChunkSize + +const ( + // defaultMultipartCopyChunkSize defines the default chunk size for all + // but the last Upload Part - Copy operation of a multipart copy. + // Empirically, 32 MB is optimal. + defaultMultipartCopyChunkSize = 32 * 1024 * 1024 + + // defaultMultipartCopyMaxConcurrency defines the default maximum number + // of concurrent Upload Part - Copy operations for a multipart copy. + defaultMultipartCopyMaxConcurrency = 100 + + // defaultMultipartCopyThresholdSize defines the default object size + // above which multipart copy will be used. (PUT Object - Copy is used + // for objects at or below this size.) Empirically, 32 MB is optimal. + defaultMultipartCopyThresholdSize = 32 * 1024 * 1024 +) + +// listMax is the largest amount of objects you can request from S3 in a list call. +const listMax = 1000 + +// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint. +const noStorageClass = "NONE" + +// s3StorageClasses lists all compatible (instant retrieval) S3 storage classes. +var s3StorageClasses = []string{ + noStorageClass, + s3.StorageClassStandard, + s3.StorageClassReducedRedundancy, + s3.StorageClassStandardIa, + s3.StorageClassOnezoneIa, + s3.StorageClassIntelligentTiering, + s3.StorageClassOutposts, + s3.StorageClassGlacierIr, +} + +// validRegions maps known s3 region identifiers to region descriptors. +var validRegions = map[string]struct{}{} + +// validObjectACLs contains known s3 object Acls. +var validObjectACLs = map[string]struct{}{} + +// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set. +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + ForcePathStyle bool + Encrypt bool + KeyID string + Secure bool + SkipVerify bool + V4Auth bool + ChunkSize int64 + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + UserAgent string + ObjectACL string + SessionToken string + UseDualStack bool + Accelerate bool + LogLevel aws.LogLevelType +} + +func GetDriverName() string { + return driverName +} + +func init() { + partitions := endpoints.DefaultPartitions() + for _, p := range partitions { + for region := range p.Regions() { + validRegions[region] = struct{}{} + } + } + + for _, objectACL := range []string{ + s3.ObjectCannedACLPrivate, + s3.ObjectCannedACLPublicRead, + s3.ObjectCannedACLPublicReadWrite, + s3.ObjectCannedACLAuthenticatedRead, + s3.ObjectCannedACLAwsExecRead, + s3.ObjectCannedACLBucketOwnerRead, + s3.ObjectCannedACLBucketOwnerFullControl, + } { + validObjectACLs[objectACL] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register(driverName, &s3DriverFactory{}) +} + +// TODO: figure-out why init is not called automatically +func Register() { + log.Info().Msgf("registering s3 driver") +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface. +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +var _ storagedriver.StorageDriver = &driver{} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + KeyID string + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + ObjectACL string + pool *sync.Pool +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt. +// +//nolint:gocognit +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called). + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + + forcePathStyleBool := true + forcePathStyle := parameters["forcepathstyle"] + switch forcePathStyle := forcePathStyle.(type) { + case string: + b, err := strconv.ParseBool(forcePathStyle) + if err != nil { + return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean") + } + forcePathStyleBool = b + case bool: + forcePathStyleBool = forcePathStyle + case nil: + // do nothing + default: + return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean") + } + + regionName := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("no region parameter provided") + } + region := fmt.Sprint(regionName) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok := validRegions[region]; !ok { + return nil, fmt.Errorf("invalid region provided: %v", region) + } + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("no bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("the encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("the encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("the secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("the secure parameter should be a boolean") + } + + skipVerifyBool := false + skipVerify := parameters["skipverify"] + switch skipVerify := skipVerify.(type) { + case string: + b, err := strconv.ParseBool(skipVerify) + if err != nil { + return nil, fmt.Errorf("the skipVerify parameter should be a boolean") + } + skipVerifyBool = b + case bool: + skipVerifyBool = skipVerify + case nil: + // do nothing + default: + return nil, fmt.Errorf("the skipVerify parameter should be a boolean") + } + + v4Bool := true + v4auth := parameters["v4auth"] + switch v4auth := v4auth.(type) { + case string: + b, err := strconv.ParseBool(v4auth) + if err != nil { + return nil, fmt.Errorf("the v4auth parameter should be a boolean") + } + v4Bool = b + case bool: + v4Bool = v4auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("the v4auth parameter should be a boolean") + } + + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + + chunkSize, err := getParameterAsInt64( + parameters, "chunksize", + defaultChunkSize, minChunkSize, maxChunkSize, + ) + if err != nil { + return nil, err + } + + multipartCopyChunkSize, err := getParameterAsInt64( + parameters, + "multipartcopychunksize", + defaultMultipartCopyChunkSize, + minChunkSize, + maxChunkSize, + ) + if err != nil { + return nil, err + } + + multipartCopyMaxConcurrency, err := getParameterAsInt64( + parameters, + "multipartcopymaxconcurrency", + defaultMultipartCopyMaxConcurrency, + 1, + math.MaxInt64, + ) + if err != nil { + return nil, err + } + + multipartCopyThresholdSize, err := getParameterAsInt64( + parameters, + "multipartcopythresholdsize", + defaultMultipartCopyThresholdSize, + 0, + maxChunkSize, + ) + if err != nil { + return nil, err + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf( + "the storageclass parameter must be one of %v, %v invalid", + s3StorageClasses, + storageClassParam, + ) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != noStorageClass && + storageClassString != s3.StorageClassStandard && + storageClassString != s3.StorageClassReducedRedundancy && + storageClassString != s3.StorageClassStandardIa && + storageClassString != s3.StorageClassOnezoneIa && + storageClassString != s3.StorageClassIntelligentTiering && + storageClassString != s3.StorageClassOutposts && + storageClassString != s3.StorageClassGlacierIr { + return nil, fmt.Errorf( + "the storageclass parameter must be one of %v, %v invalid", + s3StorageClasses, + storageClassParam, + ) + } + storageClass = storageClassString + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + objectACL := s3.ObjectCannedACLPrivate + objectACLParam := parameters["objectacl"] + if objectACLParam != nil { + objectACLString, ok := objectACLParam.(string) + if !ok { + return nil, fmt.Errorf( + "invalid value for objectacl parameter: %v", + objectACLParam, + ) + } + + if _, ok = validObjectACLs[objectACLString]; !ok { + return nil, fmt.Errorf( + "invalid value for objectacl parameter: %v", + objectACLParam, + ) + } + objectACL = objectACLString + } + + useDualStackBool := false + useDualStack := parameters["usedualstack"] + switch useDualStack := useDualStack.(type) { + case string: + b, err := strconv.ParseBool(useDualStack) + if err != nil { + return nil, fmt.Errorf("the useDualStack parameter should be a boolean") + } + useDualStackBool = b + case bool: + useDualStackBool = useDualStack + case nil: + // do nothing + default: + return nil, fmt.Errorf("the useDualStack parameter should be a boolean") + } + + sessionToken := "" + + accelerateBool := false + accelerate := parameters["accelerate"] + switch accelerate := accelerate.(type) { + case string: + b, err := strconv.ParseBool(accelerate) + if err != nil { + return nil, fmt.Errorf("the accelerate parameter should be a boolean") + } + accelerateBool = b + case bool: + accelerateBool = accelerate + case nil: + // do nothing + default: + return nil, fmt.Errorf("the accelerate parameter should be a boolean") + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + fmt.Sprint(regionEndpoint), + forcePathStyleBool, + encryptBool, + fmt.Sprint(keyID), + secureBool, + skipVerifyBool, + v4Bool, + chunkSize, + multipartCopyChunkSize, + multipartCopyMaxConcurrency, + multipartCopyThresholdSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + objectACL, + fmt.Sprint(sessionToken), + useDualStackBool, + accelerateBool, + getS3LogLevelFromParam(parameters["loglevel"]), + } + + return New(params) +} + +func getS3LogLevelFromParam(param interface{}) aws.LogLevelType { + if param == nil { + return aws.LogOff + } + logLevelParam, ok := param.(string) + if !ok { + log.Warn().Msg("Error: param is not of type string") + } + var logLevel aws.LogLevelType + switch strings.ToLower(logLevelParam) { + case "off": + logLevel = aws.LogOff + case "debug": + logLevel = aws.LogDebug + case "debugwithsigning": + logLevel = aws.LogDebugWithSigning + case "debugwithhttpbody": + logLevel = aws.LogDebugWithHTTPBody + case "debugwithrequestretries": + logLevel = aws.LogDebugWithRequestRetries + case "debugwithrequesterrors": + logLevel = aws.LogDebugWithRequestErrors + case "debugwitheventstreambody": + logLevel = aws.LogDebugWithEventStreamBody + default: + logLevel = aws.LogOff + } + return logLevel +} + +// getParameterAsInt64 converts parameters[name] to an int64 value (using +// defaultt if nil), verifies it is no smaller than min, and returns it. +func getParameterAsInt64( + parameters map[string]interface{}, + name string, + defaultt int64, + min int64, + max int64, +) (int64, error) { + rv := defaultt + param := parameters[name] + switch v := param.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param) + } + rv = vv + case int64: + rv = v + case int, uint, int32, uint32, uint64: + rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int() + case nil: + // do nothing + default: + return 0, fmt.Errorf("invalid value for %s: %#v", name, param) + } + + if rv < min || rv > max { + return 0, fmt.Errorf( + "the %s %#v parameter should be a number between %d and %d (inclusive)", + name, + rv, + min, + max, + ) + } + + return rv, nil +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName. +func New(params DriverParameters) (*Driver, error) { + if !params.V4Auth && + (params.RegionEndpoint == "" || + strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) { + return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication") + } + + awsConfig := aws.NewConfig().WithLogLevel(params.LogLevel) + + if params.AccessKey != "" && params.SecretKey != "" { + creds := credentials.NewStaticCredentials( + params.AccessKey, + params.SecretKey, + params.SessionToken, + ) + awsConfig.WithCredentials(creds) + } + + if params.RegionEndpoint != "" { + awsConfig.WithEndpoint(params.RegionEndpoint) + awsConfig.WithS3ForcePathStyle(params.ForcePathStyle) + } + + awsConfig.WithS3UseAccelerate(params.Accelerate) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + if params.UseDualStack { + awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + } + + if params.SkipVerify { + httpTransport := http.DefaultTransport.(*http.Transport).Clone() + httpTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS12} + awsConfig.WithHTTPClient( + &http.Client{ + Transport: httpTransport, + }, + ) + } + + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, fmt.Errorf("failed to create new session with aws config: %w", err) + } + + if params.UserAgent != "" { + sess.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(params.UserAgent)) + } + + s3obj := s3.New(sess) + + // enable S3 compatible signature v2 signing instead + if !params.V4Auth { + setv2Handlers(s3obj) + } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, + MultipartCopyChunkSize: params.MultipartCopyChunkSize, + MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, + MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + ObjectACL: params.ObjectACL, + pool: &sync.Pool{ + New: func() interface{} { + return &buffer{ + data: make([]byte, 0, params.ChunkSize), + } + }, + }, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + return io.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + log.Ctx(ctx).Debug().Msgf("[AWS] PutContent: %s", path) + _, err := d.S3.PutObjectWithContext( + ctx, &s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }, + ) + return parseError(path, err) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + log.Ctx(ctx).Debug().Msgf("[AWS] GetObject: %s", path) + resp, err := d.S3.GetObjectWithContext( + ctx, &s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }, + ) + if err != nil { + var s3Err awserr.Error + if ok := errors.As(err, &s3Err); ok && s3Err.Code() == "InvalidRange" { + return io.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +// It only allows appending to paths with zero size committed content, +// in which the existing content is overridden with the new content. +// It returns storagedriver.Error when appending to paths +// with non-zero committed content. +func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !appendMode { + log.Ctx(ctx).Debug().Msgf("[AWS] CreateMultipartUpload: %s", path) + resp, err := d.S3.CreateMultipartUploadWithContext( + ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }, + ) + if err != nil { + return nil, err + } + return d.newWriter(ctx, key, *resp.UploadId, nil), nil + } + + listMultipartUploadsInput := &s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + } + for { + log.Ctx(ctx).Debug().Msgf("[AWS] ListMultipartUploads: %s", path) + resp, err := d.S3.ListMultipartUploadsWithContext(ctx, listMultipartUploadsInput) + if err != nil { + return nil, parseError(path, err) + } + + // resp.Uploads can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would be exited without recalling ListMultipartUploads + if len(resp.Uploads) == 0 { + fi, err := d.Stat(ctx, path) + if err != nil { + return nil, parseError(path, err) + } + + if fi.Size() == 0 { + log.Ctx(ctx).Debug().Msgf("[AWS] CreateMultipartUpload: %s", path) + resp, err := d.S3.CreateMultipartUploadWithContext( + ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }, + ) + if err != nil { + return nil, err + } + return d.newWriter(ctx, key, *resp.UploadId, nil), nil + } + return nil, storagedriver.Error{ + DriverName: driverName, + Detail: fmt.Errorf("append to zero-size path %s unsupported", path), + } + } + + var allParts []*s3.Part + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue + } + + log.Debug().Msgf("[AWS] ListParts: %s", path) + partsList, err := d.S3.ListPartsWithContext( + ctx, &s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }, + ) + if err != nil { + return nil, parseError(path, err) + } + allParts = append(allParts, partsList.Parts...) + for *partsList.IsTruncated { + log.Debug().Msgf("[AWS] ListParts: %s", path) + partsList, err = d.S3.ListPartsWithContext( + ctx, &s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + PartNumberMarker: partsList.NextPartNumberMarker, + }, + ) + if err != nil { + return nil, parseError(path, err) + } + allParts = append(allParts, partsList.Parts...) + } + return d.newWriter(ctx, key, *multi.UploadId, allParts), nil + } + + // resp.NextUploadIdMarker must have at least one element or we would have returned not found + listMultipartUploadsInput.UploadIdMarker = resp.NextUploadIdMarker + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + log.Debug().Msgf("[AWS] ListObjectsV2: %s", path) + resp, err := d.S3.ListObjectsV2WithContext( + ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }, + ) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + switch { + case len(resp.Contents) == 1: + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + case len(resp.CommonPrefixes) == 1: + fi.IsDir = true + default: + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path += "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + log.Debug().Msgf("[AWS] ListObjectsV2: %s", path) + resp, err := d.S3.ListObjectsV2WithContext( + ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }, + ) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append( + directories, + strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1), + ) + } + + if *resp.IsTruncated { + log.Debug().Msgf("[AWS] ListObjectsV2: %s", path) + resp, err = d.S3.ListObjectsV2WithContext( + ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + ContinuationToken: resp.NextContinuationToken, + }, + ) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + if err := d.copy(ctx, sourcePath, destPath); err != nil { + return err + } + return d.Delete(ctx, sourcePath) +} + +// copy copies an object stored at sourcePath to destPath. +func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) error { + // S3 can copy objects up to 5 GB in size with a single PUT Object - Copy + // operation. For larger objects, the multipart upload API must be used. + // + // Empirically, multipart copy is fastest with 32 MB parts and is faster + // than PUT Object - Copy for objects larger than 32 MB. + + fileInfo, err := d.Stat(ctx, sourcePath) + if err != nil { + return parseError(sourcePath, err) + } + + if fileInfo.Size() <= d.MultipartCopyThresholdSize { + log.Debug().Msgf("[AWS] CopyObject: %s -> %s", sourcePath, destPath) + _, err := d.S3.CopyObjectWithContext( + ctx, &s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }, + ) + if err != nil { + return parseError(sourcePath, err) + } + return nil + } + + log.Debug().Msgf("[AWS] CreateMultipartUpload: %s", destPath) + createResp, err := d.S3.CreateMultipartUploadWithContext( + ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }, + ) + if err != nil { + return err + } + + numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize + completedParts := make([]*s3.CompletedPart, numParts) + errChan := make(chan error, numParts) + limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency) + + for i := range completedParts { + i := int64(i) + go func() { + limiter <- struct{}{} + firstByte := i * d.MultipartCopyChunkSize + lastByte := firstByte + d.MultipartCopyChunkSize - 1 + if lastByte >= fileInfo.Size() { + lastByte = fileInfo.Size() - 1 + } + log.Debug().Msgf("[AWS] [%d] UploadPartCopy: %s -> %s", i, sourcePath, destPath) + uploadResp, err := d.S3.UploadPartCopyWithContext( + ctx, &s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + Key: aws.String(d.s3Path(destPath)), + PartNumber: aws.Int64(i + 1), + UploadId: createResp.UploadId, + CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)), + }, + ) + if err == nil { + completedParts[i] = &s3.CompletedPart{ + ETag: uploadResp.CopyPartResult.ETag, + PartNumber: aws.Int64(i + 1), + } + } + errChan <- err + <-limiter + }() + } + + for range completedParts { + err := <-errChan + if err != nil { + return err + } + } + + log.Debug().Msgf("[AWS] CompleteMultipartUpload: %s", destPath) + _, err = d.S3.CompleteMultipartUploadWithContext( + ctx, &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + UploadId: createResp.UploadId, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts}, + }, + ) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +// We must be careful since S3 does not guarantee read after delete consistency. +func (d *driver) Delete(ctx context.Context, path string) error { + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + s3Path := d.s3Path(path) + listObjectsInput := &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(s3Path), + } + + for { + // list all the objects + log.Debug().Msgf("[AWS] List all the objects: %s", path) + resp, err := d.S3.ListObjectsV2WithContext(ctx, listObjectsInput) + + // resp.Contents can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would exit without recalling ListObjects + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + for _, key := range resp.Contents { + // Skip if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' { + continue + } + s3Objects = append( + s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }, + ) + } + + // Delete objects only if the list is not empty, otherwise S3 API returns a cryptic error + if len(s3Objects) > 0 { + // NOTE: according to AWS docs + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + // by default the response returns up to 1,000 key names. The response _might_ + // contain fewer keys but it will never contain more. + // 10000 keys is coincidentally (?) also the max number of keys that can be + // deleted in a single Delete operation, so we'll just smack + // Delete here straight away and reset the object slice when successful. + log.Debug().Msgf("[AWS] DeleteObjects: %s", path) + resp, err := d.S3.DeleteObjectsWithContext( + ctx, &s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects, + Quiet: aws.Bool(false), + }, + }, + ) + if err != nil { + return err + } + + if len(resp.Errors) > 0 { + // NOTE: AWS SDK s3.Error does not implement error interface which + // is pretty intensely sad, so we have to do away with this for now. + errs := make([]error, 0, len(resp.Errors)) + for _, err := range resp.Errors { + errs = append(errs, errors.New(err.String())) + } + return storagedriver.StorageDriverError{ + DriverName: driverName, + Errs: errs, + } + } + } + // NOTE: we don't want to reallocate + // the slice so we simply "reset" it + s3Objects = s3Objects[:0] + + // resp.Contents must have at least one element or we would have returned not found + listObjectsInput.StartAfter = resp.Contents[len(resp.Contents)-1].Key + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + + return nil +} + +// RedirectURL returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) RedirectURL(_ context.Context, method string, path string) (string, error) { + expiresIn := 20 * time.Minute + + var req *request.Request + + switch method { + case http.MethodGet: + req, _ = d.S3.GetObjectRequest( + &s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }, + ) + case http.MethodHead: + req, _ = d.S3.HeadObjectRequest( + &s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }, + ) + default: + return "", nil + } + + log.Debug().Msgf("[AWS] Generating presigned URL for %s %s", method, path) + return req.Presign(expiresIn) +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file. +func (d *driver) Walk( + ctx context.Context, + from string, + f storagedriver.WalkFn, + options ...func(*storagedriver.WalkOptions), +) error { + walkOptions := &storagedriver.WalkOptions{} + for _, o := range options { + o(walkOptions) + } + + var objectCount int64 + if err := d.doWalk(ctx, &objectCount, from, walkOptions.StartAfterHint, f); err != nil { + return err + } + + return nil +} + +func (d *driver) doWalk( + parentCtx context.Context, + objectCount *int64, + from string, + startAfter string, + f storagedriver.WalkFn, +) error { + var ( + retError error + // the most recent directory walked for de-duping + prevDir string + // the most recent skip directory to avoid walking over undesirable files + prevSkipDir string + ) + prevDir = from + + path := from + if !strings.HasSuffix(path, "/") { + path += "/" + } + + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listObjectsInput := &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(listMax), + StartAfter: aws.String(d.s3Path(startAfter)), + } + + ctx, done := dcontext.WithTrace(parentCtx) + defer done("s3aws.ListObjectsV2PagesWithContext(%s)", listObjectsInput) + + // When the "delimiter" argument is omitted, the S3 list + // API will list all objects in the bucket + // recursively, omitting directory paths. + // Objects are listed in sorted, depth-first order so we + // can infer all the directories by comparing each object + // path to the last one we saw. See: + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html + + // With files returned in sorted depth-first order, directories + // are inferred in the same order. + // ErrSkipDir is handled by explicitly skipping over any files + // under the skipped directory. This may be sub-optimal + // for extreme edge cases but for the general use case + // in a registry, this is orders of magnitude + // faster than a more explicit recursive implementation. + log.Debug().Msgf("[AWS] Listing objects in %s", path) + listObjectErr := d.S3.ListObjectsV2PagesWithContext( + ctx, + listObjectsInput, + func(objects *s3.ListObjectsV2Output, _ bool) bool { + walkInfos := make([]storagedriver.FileInfoInternal, 0, len(objects.Contents)) + + for _, file := range objects.Contents { + filePath := strings.Replace(*file.Key, d.s3Path(""), prefix, 1) + + // get a list of all inferred directories between the previous directory and this file + dirs := directoryDiff(prevDir, filePath) + if len(dirs) > 0 { + for _, dir := range dirs { + walkInfos = append( + walkInfos, storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: true, + Path: dir, + }, + }, + ) + prevDir = dir + } + } + + walkInfos = append( + walkInfos, storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: false, + Size: *file.Size, + ModTime: *file.LastModified, + Path: filePath, + }, + }, + ) + } + + for _, walkInfo := range walkInfos { + // skip any results under the last skip directory + if prevSkipDir != "" && strings.HasPrefix(walkInfo.Path(), prevSkipDir) { + continue + } + + err := f(walkInfo) + *objectCount++ + + if err != nil { + if errors.Is(err, storagedriver.ErrSkipDir) { + prevSkipDir = walkInfo.Path() + continue + } + if errors.Is(err, storagedriver.ErrFilledBuffer) { + return false + } + retError = err + return false + } + } + return true + }, + ) + + if retError != nil { + return retError + } + + if listObjectErr != nil { + return listObjectErr + } + + return nil +} + +// directoryDiff finds all directories that are not in common between +// the previous and current paths in sorted order. +// +// # Examples +// +// directoryDiff("/path/to/folder", "/path/to/folder/folder/file") +// // => [ "/path/to/folder/folder" ] +// +// directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file") +// // => [ "/path/to/folder/folder2" ] +// +// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file") +// // => [ "/path/to/folder/folder2" ] +// +// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file") +// // => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ] +// +// directoryDiff("/", "/path/to/folder/folder/file") +// // => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ] +func directoryDiff(prev, current string) []string { + var paths []string + + if prev == "" || current == "" { + return paths + } + + parent := current + for { + parent = filepath.Dir(parent) + if parent == "/" || parent == prev || strings.HasPrefix(prev+"/", parent+"/") { + break + } + paths = append(paths, parent) + } + reverse(paths) + return paths +} + +func reverse(s []string) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + var s3Err awserr.Error + if ok := errors.As(err, &s3Err); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { + return aws.String("AES256") + } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String(d.ObjectACL) +} + +func (d *driver) getStorageClass() *string { + if d.StorageClass == noStorageClass { + return nil + } + return aws.String(d.StorageClass) +} + +// buffer is a static size bytes buffer. +type buffer struct { + data []byte +} + +// NewBuffer returns a new bytes buffer from driver's memory pool. +// The size of the buffer is static and set to params.ChunkSize. +func (d *driver) NewBuffer() *buffer { + return d.pool.Get().(*buffer) +} + +// ReadFrom reads as much data as it can fit in from r without growing its size. +// It returns the number of bytes successfully read from r or error. +func (b *buffer) ReadFrom(r io.Reader) (offset int64, err error) { + for len(b.data) < cap(b.data) && err == nil { + var n int + n, err = r.Read(b.data[len(b.data):cap(b.data)]) + offset += int64(n) + b.data = b.data[:len(b.data)+n] + } + if err == io.EOF { + err = nil + } + return offset, err +} + +// Cap returns the capacity of the buffer's underlying byte slice. +func (b *buffer) Cap() int { + return cap(b.data) +} + +// Len returns the length of the data in the buffer. +func (b *buffer) Len() int { + return len(b.data) +} + +// Clear the buffer data. +func (b *buffer) Clear() { + b.data = b.data[:0] +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + ctx context.Context + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + ready *buffer + pending *buffer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(ctx context.Context, key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + ctx: ctx, + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + ready: d.NewBuffer(), + pending: d.NewBuffer(), + } +} + +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +//nolint:gocognit +func (w *writer) Write(p []byte) (int, error) { + switch { + case w.closed: + return 0, fmt.Errorf("already closed") + case w.committed: + return 0, fmt.Errorf("already committed") + case w.cancelled: + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + completedUploadedParts := make(completedParts, len(w.parts)) + for i, part := range w.parts { + completedUploadedParts[i] = &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + } + } + + sort.Sort(completedUploadedParts) + + log.Debug().Msgf("[AWS] Completing multipart upload for %s", w.key) + _, err := w.driver.S3.CompleteMultipartUploadWithContext( + w.ctx, &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }, + ) + if err != nil { + log.Debug().Msgf("[AWS] Abort multipart upload for %s: %v", w.key, err) + if _, aErr := w.driver.S3.AbortMultipartUploadWithContext( + w.ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }, + ); aErr != nil { + return 0, errors.Join(err, aErr) + } + return 0, err + } + + log.Debug().Msgf("[AWS] Creating new multipart upload for %s", w.key) + resp, err := w.driver.S3.CreateMultipartUploadWithContext( + w.ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }, + ) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + log.Debug().Msgf("[AWS] Uploading new part for %s", w.key) + resp, err := w.driver.S3.GetObjectWithContext( + w.ctx, &s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }, + ) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + // reset uploaded parts + w.parts = nil + w.ready.Clear() + + n, err := w.ready.ReadFrom(resp.Body) + if err != nil { + return 0, err + } + if resp.ContentLength != nil && n < *resp.ContentLength { + return 0, io.ErrShortBuffer + } + } else { + // Otherwise we can use the old file as the new first part + log.Debug().Msgf("[AWS] Upload copy part for %s", w.key) + copyPartResp, err := w.driver.S3.UploadPartCopyWithContext( + w.ctx, &s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }, + ) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + defer func() { w.size += int64(n) }() + + reader := bytes.NewReader(p) + + for reader.Len() > 0 { + // NOTE: we do some seemingly unsafe conversions + // from int64 to int in this for loop. These are fine as the + // offset returned from buffer.ReadFrom can only ever be + // maxChunkSize large which fits in to int. The reason why + // we return int64 is to play nice with Go interfaces where + // the buffer implements io.ReaderFrom interface. + + // fill up the ready parts buffer + offset, err := w.ready.ReadFrom(reader) + n += int(offset) + if err != nil { + return n, err + } + + // try filling up the pending parts buffer + offset, err = w.pending.ReadFrom(reader) + n += int(offset) + if err != nil { + return n, err + } + + // we filled up pending buffer, flush + if w.pending.Len() == w.pending.Cap() { + if err := w.flush(); err != nil { + return n, err + } + } + } + + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + + defer func() { + w.ready.Clear() + w.driver.pool.Put(w.ready) + w.pending.Clear() + w.driver.pool.Put(w.pending) + }() + + return w.flush() +} + +func (w *writer) Cancel(ctx context.Context) error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + log.Debug().Msgf("[AWS] Abort multipart upload for %s", w.key) + _, err := w.driver.S3.AbortMultipartUploadWithContext( + ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }, + ) + return err +} + +func (w *writer) Commit(_ context.Context) error { + switch { + case w.closed: + return fmt.Errorf("already closed") + case w.committed: + return fmt.Errorf("already committed") + case w.cancelled: + return fmt.Errorf("already cancelled") + } + + err := w.flush() + if err != nil { + return err + } + + w.committed = true + + completedUploadedParts := make(completedParts, len(w.parts)) + for i, part := range w.parts { + completedUploadedParts[i] = &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + } + } + + // This is an edge case when we are trying to upload an empty file as part of + // the MultiPart upload. We get a PUT with Content-Length: 0 and sad things happen. + // The result is we are trying to Complete MultipartUpload with an empty list of + // completedUploadedParts which will always lead to 400 being returned from S3 + // See: https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#CompletedMultipartUpload + // Solution: we upload the empty i.e. 0 byte part as a single part and then append it + // to the completedUploadedParts slice used to complete the Multipart upload. + if len(w.parts) == 0 { + log.Debug().Msgf("[AWS] Upload empty part for %s", w.key) + resp, err := w.driver.S3.UploadPartWithContext( + w.ctx, &s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(nil), + }, + ) + if err != nil { + return err + } + tmp := completedUploadedParts + + tmp = append( + tmp, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(1), + }, + ) + + completedUploadedParts = tmp + } + + sort.Sort(completedUploadedParts) + + log.Debug().Msgf("[AWS] Complete multipart upload for %s", w.key) + _, err = w.driver.S3.CompleteMultipartUploadWithContext( + w.ctx, &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }, + ) + if err != nil { + log.Debug().Msgf("[AWS] Abort multipart upload for %s: %v", w.key, err) + if _, aErr := w.driver.S3.AbortMultipartUploadWithContext( + w.ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }, + ); aErr != nil { + return errors.Join(err, aErr) + } + return err + } + return nil +} + +// flush flushes all buffers to write a part to S3. +// flush is only called by Write (with both buffers full) and Close/Commit (always). +func (w *writer) flush() error { + if w.ready.Len() == 0 && w.pending.Len() == 0 { + return nil + } + + start := time.Now() + buf := bytes.NewBuffer(w.ready.data) + partSize := buf.Len() + partNumber := aws.Int64(int64(len(w.parts) + 1)) + + log.Debug().Msgf("[AWS] Upload part %d for %s", *partNumber, w.key) + resp, err := w.driver.S3.UploadPartWithContext( + w.ctx, &s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(buf.Bytes()), + }, + ) + log.Debug().Msgf("Elapsed1: %d, %f, %s", *partNumber, time.Since(start).Seconds(), w.key) + if err != nil { + return err + } + + w.parts = append( + w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(partSize)), + }, + ) + log.Debug().Msgf("Elapsed2: %d, %f, %s", *partNumber, time.Since(start).Seconds(), w.key) + // reset the flushed buffer and swap buffers + w.ready.Clear() + w.ready, w.pending = w.pending, w.ready + + // In case we have more data in the pending buffer (now ready), we need to flush it + if w.ready.Len() > 0 { + start = time.Now() + err := w.flush() + log.Debug().Msgf("Elapsed Recursive: %d, %f, %s", *partNumber, time.Since(start).Seconds(), w.key) + return err + } + + log.Debug().Msgf("Elapsed exit: %d, %f, %s", *partNumber, time.Since(start).Seconds(), w.key) + return nil +} diff --git a/registry/app/driver/s3-aws/s3_v2_signer.go b/registry/app/driver/s3-aws/s3_v2_signer.go new file mode 100644 index 000000000..870c82187 --- /dev/null +++ b/registry/app/driver/s3-aws/s3_v2_signer.go @@ -0,0 +1,225 @@ +// Source: https://github.com/pivotal-golang/s3cli + +// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s3 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/rs/zerolog/log" +) + +type signer struct { + // Values that must be populated from the request. + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Query url.Values + stringToSign string + signature string +} + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +// setv2Handlers will setup v2 signature signing on the S3 driver. +func setv2Handlers(svc *s3.S3) { + svc.Handlers.Build.PushBack( + func(r *request.Request) { + parsedURL, err := url.Parse(r.HTTPRequest.URL.String()) + if err != nil { + log.Fatal().Msgf("Failed to parse URL: %v", err) + } + r.HTTPRequest.URL.Opaque = parsedURL.Path + }, + ) + + svc.Handlers.Sign.Clear() + svc.Handlers.Sign.PushBack(Sign) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) +} + +// Sign requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + } + + // nolint:errcheck. + err := v2.Sign() + if err != nil { + log.Fatal().Msgf("Error in signing s3: %v", err) + } +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + accessKey := credValue.AccessKeyID + var ( + md5, ctype, date, xamz string + xamzDate bool + sarray []string + smap map[string]string + sharray []string + ) + + headers := v2.Request.Header + params := v2.Request.URL.Query() + parsedURL, err := url.Parse(v2.Request.URL.String()) + if err != nil { + return err + } + host, canonicalPath := parsedURL.Host, parsedURL.Path + v2.Request.Header["Host"] = []string{host} + v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} + if credValue.SessionToken != "" { + v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken} + } + + smap = make(map[string]string) + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + smap[k] = k + ":" + vall + if k == "x-amz-date" { + xamzDate = true + date = "" + } + sharray = append(sharray, k) + } + } + } + if len(sharray) > 0 { + sort.StringSlice(sharray).Sort() + for _, h := range sharray { + sarray = append(sarray, smap[h]) + } + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{accessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + v2.stringToSign = strings.Join( + []string{ + v2.Request.Method, + md5, + ctype, + date, + xamz + canonicalPath, + }, "\n", + ) + hash := hmac.New(sha256.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + + if expires { + params["Signature"] = []string{v2.signature} + } else { + headers["Authorization"] = []string{"AWS " + accessKey + ":" + v2.signature} + } + + log.Debug(). + Interface("string-to-sign", v2.stringToSign). + Interface("signature", v2.signature). + Msg("request signature") + return nil +} diff --git a/registry/app/driver/storagedriver.go b/registry/app/driver/storagedriver.go new file mode 100644 index 000000000..e7c70248b --- /dev/null +++ b/registry/app/driver/storagedriver.go @@ -0,0 +1,271 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "context" + "encoding/json" + "fmt" + "io" + "regexp" + "strconv" + "strings" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart, _, _ := strings.Cut(string(version), ".") + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + _, minorPart, _ := strings.Cut(string(version), ".") + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// WalkOptions provides options to the walk function that may adjust its behaviour. +type WalkOptions struct { + // If StartAfterHint is set, the walk may start with the first item lexographically + // after the hint, but it is not guaranteed and drivers may start the walk from the path. + StartAfterHint string +} + +func WithStartAfterHint(startAfterHint string) func(*WalkOptions) { + return func(s *WalkOptions) { + s.StartAfterHint = startAfterHint + } +} + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface +// (https://godoc.org/github.com/distribution/distribution/registry/storage/driver/factory). +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver. +type StorageDriver interface { + StorageDeleter + + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // Reader retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + // A path may be appended to if it has not been committed, or if the + // existing committed content is zero length. + // + // The behaviour of appending to paths with non-empty committed content is + // undefined. Specific implementations may document their own behavior. + Writer(ctx context.Context, path string, a bool) (FileWriter, error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + // given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + Move(ctx context.Context, sourcePath string, destPath string) error + + // RedirectURL returns a URL which the client of the request r may use + // to retrieve the content stored at path. Returning the empty string + // signals that the request may not be redirected. + RedirectURL(ctx context.Context, method string, path string) (string, error) + + // Walk traverses a filesystem defined within driver, starting + // from the given path, calling f on each file. + // If the returned error from the WalkFn is ErrSkipDir and fileInfo refers + // to a directory, the directory will not be entered and Walk + // will continue the traversal. + // If the returned error from the WalkFn is ErrFilledBuffer, processing stops. + Walk(ctx context.Context, path string, f WalkFn, options ...func(*WalkOptions)) error +} + +// StorageDeleter defines methods that a Storage Driver must implement to delete objects. +// This allows using a narrower interface than StorageDriver when we only need the delete functionality, such as when +// mocking a storage driver for testing online garbage collection. +type StorageDeleter interface { + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error +} + +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel(context.Context) error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit(context.Context) error +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// UnsupportedMethodError may be returned in the case where a +// StorageDriver implementation does not support an optional method. +type UnsupportedMethodError struct { + DriverName string +} + +func (err UnsupportedMethodError) Error() string { + return fmt.Sprintf("%s: unsupported method", err.DriverName) +} + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string + DriverName string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string + DriverName string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 + DriverName string +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) +} + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occurred. +type Error struct { + DriverName string + Detail error +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.DriverName, err.Detail) +} + +func (err Error) MarshalJSON() ([]byte, error) { + return json.Marshal( + struct { + DriverName string `json:"driver"` + Detail string `json:"detail"` + }{ + DriverName: err.DriverName, + Detail: err.Detail.Error(), + }, + ) +} + +// StorageDriverError provides the envelope for multiple errors +// for use within the storagedriver implementations. +type StorageDriverError struct { + DriverName string + Errs []error +} + +var _ error = StorageDriverError{} + +func (e StorageDriverError) Error() string { + switch len(e.Errs) { + case 0: + return fmt.Sprintf("%s: ", e.DriverName) + case 1: + return fmt.Sprintf("%s: %s", e.DriverName, e.Errs[0].Error()) + default: + msg := "errors:\n" + for _, err := range e.Errs { + msg += err.Error() + "\n" + } + return fmt.Sprintf("%s: %s", e.DriverName, msg) + } +} + +// MarshalJSON converts slice of errors into the format +// that is serializable by JSON. +func (e StorageDriverError) MarshalJSON() ([]byte, error) { + tmpErrs := struct { + DriverName string `json:"driver"` + Details []string `json:"details"` + }{ + DriverName: e.DriverName, + } + + if len(e.Errs) == 0 { + tmpErrs.Details = make([]string, 0) + return json.Marshal(tmpErrs) + } + + for _, err := range e.Errs { + tmpErrs.Details = append(tmpErrs.Details, err.Error()) + } + + return json.Marshal(tmpErrs) +} diff --git a/registry/app/driver/testsuites/testsuites.go b/registry/app/driver/testsuites/testsuites.go new file mode 100644 index 000000000..3edf2a643 --- /dev/null +++ b/registry/app/driver/testsuites/testsuites.go @@ -0,0 +1,1429 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testsuites + +import ( + "bytes" + "context" + crand "crypto/rand" + "crypto/sha256" + "errors" + "io" + "math/big" + "net/http" + "os" + "path" + "sort" + "sync" + "testing" + "time" + + storagedriver "github.com/harness/gitness/registry/app/driver" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/suite" +) + +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 128<<20) + +const suffix = "suffix" + +func init() { + _, _ = crand.Read(randomBytes) // always returns len(randomBytes) and nil error +} + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a [suite.Suite] test suite designed to test a +// storagedriver.StorageDriver. +type DriverSuite struct { + suite.Suite + Constructor DriverConstructor + Teardown DriverTeardown + storagedriver.StorageDriver + ctx context.Context +} + +// Driver runs [DriverSuite] for the given [DriverConstructor]. +func Driver(t *testing.T, driverConstructor DriverConstructor) { + suite.Run( + t, &DriverSuite{ + Constructor: driverConstructor, + ctx: context.Background(), + }, + ) +} + +// SetupSuite implements [suite.SetupAllSuite] interface. +func (suite *DriverSuite) SetupSuite() { + d, err := suite.Constructor() + suite.Require().NoError(err) + suite.StorageDriver = d +} + +// TearDownSuite implements [suite.TearDownAllSuite]. +func (suite *DriverSuite) TearDownSuite() { + if suite.Teardown != nil { + suite.Require().NoError(suite.Teardown()) + } +} + +// TearDownTest implements [suite.TearDownTestSuite]. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest() { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + suite.T().Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists() { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + suite.T().Fatalf(`the root path "/" should always exist: %v`, err) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths() { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba", + } + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.deletePath(firstPart(filename)) + suite.Require().NoError(err) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().NoError(err) + suite.Require().Equal(contents, received) + } +} + +func (suite *DriverSuite) deletePath(path string) { + for tries := 2; tries > 0; tries-- { + err := suite.StorageDriver.Delete(suite.ctx, path) + if ok := errors.As(err, &storagedriver.PathNotFoundError{}); ok { + err = nil + } + suite.Require().NoError(err) + paths, _ := suite.StorageDriver.List(suite.ctx, path) + if len(paths) == 0 { + break + } + time.Sleep(time.Second * 2) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths() { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/", + } + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + // only delete if file was successfully written + if err == nil { + defer suite.deletePath(firstPart(filename)) + } + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.InvalidPathError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.InvalidPathError{}) + suite.Require().Contains(err.Error(), suite.Name()) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1() { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2() { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3() { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4() { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8() { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate() { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent() { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1() { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2() { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3() { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4() { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8() { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams() { + if testing.Short() { + suite.T().Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + + checksum := sha256.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + suite.Require().NoError(err) + written, err := io.Copy(writer, io.TeeReader(contents, checksum)) + suite.Require().NoError(err) + suite.Require().Equal(fileSize, written) + + err = writer.Commit(context.Background()) + suite.Require().NoError(err) + err = writer.Close() + suite.Require().NoError(err) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + writtenChecksum := sha256.New() + if _, err := io.Copy(writtenChecksum, reader); err != nil { + suite.Require().NoError(err) + } + + suite.Require().Equal(checksum.Sum(nil), writtenChecksum.Sum(nil)) +} + +// TestReaderWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReaderWithOffset() { + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent( + suite.ctx, + filename, + append(append(contentsChunk1, contentsChunk2...), contentsChunk3...), + ) + suite.Require().NoError(err) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + + suite.Require().Equal(append(append(contentsChunk1, contentsChunk2...), contentsChunk3...), readContents) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) + suite.Require().NoError(err) + defer reader.Close() + + readContents, err = io.ReadAll(reader) + suite.Require().NoError(err) + + suite.Require().Equal(append(contentsChunk2, contentsChunk3...), readContents) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) + suite.Require().NoError(err) + defer reader.Close() + + readContents, err = io.ReadAll(reader) + suite.Require().NoError(err) + suite.Require().Equal(contentsChunk3, readContents) + + // Ensure we get invalid offset for negative offsets. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) + var invalidOffsetErr storagedriver.InvalidOffsetError + errors.As(err, &invalidOffsetErr) + suite.Require().IsType(err, storagedriver.InvalidOffsetError{}) + suite.Require().Equal(int64(-1), invalidOffsetErr.Offset) + suite.Require().Equal(filename, invalidOffsetErr.Path) + suite.Require().Nil(reader) + suite.Require().Contains(err.Error(), suite.Name()) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) + suite.Require().NoError(err) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + suite.Require().ErrorIs(err, io.EOF) + suite.Require().Equal(0, n) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) + suite.Require().NoError(err) + defer reader.Close() + + n, err = reader.Read(buf) + suite.Require().Equal(1, n) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + suite.Require().ErrorIs(err, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + suite.Require().Equal(0, n) + suite.Require().ErrorIs(err, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge() { + chunkSize := int64(10 * 1024 * 1024) + if suite.Name() == "azure" { + chunkSize = int64(4 * 1024 * 1024) + } + suite.testContinueStreamAppend(chunkSize) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall() { + suite.testContinueStreamAppend(int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(chunkSize int64) { + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + suite.Require().NoError(err) + nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(contentsChunk1)), nn) + + err = writer.Close() + suite.Require().NoError(err) + + curSize := writer.Size() + suite.Require().Equal(int64(len(contentsChunk1)), curSize) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + suite.Require().NoError(err) + suite.Require().Equal(curSize, writer.Size()) + + nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(contentsChunk2)), nn) + + err = writer.Close() + suite.Require().NoError(err) + + curSize = writer.Size() + suite.Require().Equal(2*chunkSize, curSize) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + suite.Require().NoError(err) + suite.Require().Equal(curSize, writer.Size()) + + nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(fullContents[curSize:])), nn) + + err = writer.Commit(context.Background()) + suite.Require().NoError(err) + err = writer.Close() + suite.Require().NoError(err) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().NoError(err) + suite.Require().Equal(fullContents, received) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream() { + filename := randomPath(32) + + _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestWriteZeroByteStreamThenAppend tests if zero byte file handling works for append to a Stream. +func (suite *DriverSuite) TestWriteZeroByteStreamThenAppend() { + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + chunkSize := int64(32) + contentsChunk1 := randomContents(chunkSize) + + // Open a Writer + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + suite.Require().NoError(err) + + // Close the Writer + err = writer.Commit(context.Background()) + suite.Require().NoError(err) + err = writer.Close() + suite.Require().NoError(err) + curSize := writer.Size() + suite.Require().Equal(int64(0), curSize) + + // Open a Reader + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + // Check the file is empty + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + suite.Require().ErrorIs(err, io.EOF) + suite.Require().Equal(0, n) + + // Open a Writer for Append + awriter, err := suite.StorageDriver.Writer(suite.ctx, filename, true) + suite.Require().NoError(err) + + // Write small bytes to AppendWriter + nn, err := io.Copy(awriter, bytes.NewReader(contentsChunk1)) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(contentsChunk1)), nn) + + // Close the AppendWriter + err = awriter.Commit(context.Background()) + suite.Require().NoError(err) + err = awriter.Close() + suite.Require().NoError(err) + appendSize := awriter.Size() + suite.Require().Equal(int64(len(contentsChunk1)), appendSize) + + // Open a Reader + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + // Read small bytes from Reader + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + suite.Require().Equal(contentsChunk1, readContents) +} + +// TestWriteZeroByteContentThenAppend tests if zero byte file handling works for append to PutContent. +func (suite *DriverSuite) TestWriteZeroByteContentThenAppend() { + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + chunkSize := int64(32) + contentsChunk1 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, nil) + suite.Require().NoError(err) + + // Open a Reader + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + // Check the file is empty + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + suite.Require().ErrorIs(err, io.EOF) + suite.Require().Equal(0, n) + + // Open a Writer for Append + awriter, err := suite.StorageDriver.Writer(suite.ctx, filename, true) + suite.Require().NoError(err) + + // Write small bytes to AppendWriter + nn, err := io.Copy(awriter, bytes.NewReader(contentsChunk1)) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(contentsChunk1)), nn) + + // Close the AppendWriter + err = awriter.Commit(context.Background()) + suite.Require().NoError(err) + err = awriter.Close() + suite.Require().NoError(err) + appendSize := awriter.Size() + suite.Require().Equal(int64(len(contentsChunk1)), appendSize) + + // Open a Reader + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + // Read small bytes from Reader + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + suite.Require().Equal(contentsChunk1, readContents) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList() { + c, e := crand.Int(crand.Reader, big.NewInt(int64(100))) + if e != nil { + log.Warn().Msgf("Error in securing random no: %s", e) + } + rootDirectory := "/" + randomFilename(c.Int64()) + defer suite.deletePath(rootDirectory) + + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + suite.Require().Equal( + err, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }, + ) + + c1, e1 := crand.Int(crand.Reader, big.NewInt(int64(100))) + if e1 != nil { + log.Warn().Msgf("Error in securing random no: %s", e1) + } + parentDirectory := rootDirectory + "/" + randomFilename(c1.Int64()) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + c2, e2 := crand.Int(crand.Reader, big.NewInt(int64(100))) + if e2 != nil { + log.Warn().Msgf("Error in securing random no: %s", e2) + } + childFile := parentDirectory + "/" + randomFilename(c2.Int64()) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + suite.Require().NoError(err) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + suite.Require().NoError(err) + suite.Require().Equal([]string{rootDirectory}, keys) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + suite.Require().NoError(err) + suite.Require().Equal([]string{parentDirectory}, keys) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + suite.Require().NoError(err) + + sort.Strings(keys) + suite.Require().Equal(childFiles, keys) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove() { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(firstPart(sourcePath)) + defer suite.deletePath(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + suite.Require().NoError(err) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + suite.Require().NoError(err) + suite.Require().Equal(contents, received) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite() { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.deletePath(firstPart(sourcePath)) + defer suite.deletePath(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + suite.Require().NoError(err) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + suite.Require().NoError(err) + suite.Require().Equal(sourceContents, received) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent() { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + suite.Require().NoError(err) + suite.Require().Equal(contents, received) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid() { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + suite.Require().NoError(err) + defer suite.deletePath("/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + suite.Require().Error(err) // non-nil error. +} + +// TestDelete checks that the delete operation removes data from the storage +// driver. +func (suite *DriverSuite) TestDelete() { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestRedirectURL checks that the RedirectURL method functions properly, +// but only if it is implemented. +func (suite *DriverSuite) TestRedirectURL() { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + url, err := suite.StorageDriver.RedirectURL(context.TODO(), "", filename) + if url == "" && err == nil { + return + } + client := &http.Client{} + suite.Require().NoError(err) + req, _ := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) + + response, err := client.Do(req) + suite.Require().NoError(err) + defer response.Body.Close() + + read, err := io.ReadAll(response.Body) + suite.Require().NoError(err) + suite.Require().Equal(contents, read) + + url, err = suite.StorageDriver.RedirectURL(context.TODO(), "", filename) + if url == "" && err == nil { + return + } + suite.Require().NoError(err) + req, _ = http.NewRequestWithContext(context.TODO(), http.MethodHead, url, nil) + + response, err = client.Do(req) + suite.Require().NoError(err) + defer response.Body.Close() + suite.Require().Equal(200, response.StatusCode) + suite.Require().Equal(int64(32), response.ContentLength) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent() { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder() { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + suite.Require().NoError(err) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) +} + +// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not +// delete path B when A is a prefix of B but B is not a subpath of A (so that +// deleting "/a" does not delete "/ab"). This matters for services like S3 that +// do not implement directories. +func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths() { + dirname := randomPath(32) + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+suffix), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+suffix, filename), contents) + suite.Require().NoError(err) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename)) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+suffix)) + suite.Require().NoError(err) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname)) + suite.Require().NoError(err) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename)) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+suffix, filename)) + suite.Require().NoError(err) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall() { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.deletePath(firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + suite.Require().Nil(fi) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + suite.Require().Error(err) + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + suite.Require().Contains(err.Error(), suite.Name()) + suite.Require().Nil(fi) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + suite.Require().NoError(err) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + suite.Require().NoError(err) + suite.Require().NotNil(fi) + suite.Require().Equal(filePath, fi.Path()) + suite.Require().Equal(int64(len(content)), fi.Size()) + suite.Require().False(fi.IsDir()) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + suite.Require().NoError(err) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + suite.Require().NoError(err) + suite.Require().NotNil(fi) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + suite.T().Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + suite.Require().NoError(err) + suite.Require().NotNil(fi) + suite.Require().Equal(dirPath, fi.Path()) + suite.Require().Equal(int64(0), fi.Size()) + suite.Require().True(fi.IsDir()) + + // The storage healthcheck performs this exact call to Stat. + // PathNotFoundErrors are not considered health check failures. + _, err = suite.StorageDriver.Stat(suite.ctx, "/") + // Some drivers will return a not found here, while others will not + // return an error at all. If we get an error, ensure it's a not found. + if err != nil { + suite.Require().IsType(err, storagedriver.PathNotFoundError{}) + } +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like Writer does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes() { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.deletePath(firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().NoError(err) + suite.Require().Equal(contents, readContents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads() { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + suite.T().Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.deletePath(firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset, e := crand.Int(crand.Reader, big.NewInt(int64(len(contents)))) + if e != nil { + log.Warn().Msgf("Error in securing random no: %s", e) + } + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset.Int64()) + suite.Require().NoError(err) + + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + suite.Require().Equal(contents[offset.Int64():], readContents) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to Writer concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams() { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + suite.T().Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +type DriverBenchmarkSuite struct { + DriverSuite +} + +func BenchDriver(b *testing.B, driverConstructor DriverConstructor) { + benchsuite := &DriverBenchmarkSuite{ + DriverSuite{ + Constructor: driverConstructor, + ctx: context.Background(), + }, + } + benchsuite.SetupSuite() + b.Cleanup(benchsuite.TearDownSuite) + + b.Run("PutGetEmptyFiles", benchsuite.BenchmarkPutGetEmptyFiles) + b.Run("PutGet1KBFiles", benchsuite.BenchmarkPutGet1KBFiles) + b.Run("PutGet1MBFiles", benchsuite.BenchmarkPutGet1MBFiles) + b.Run("PutGet1GBFiles", benchsuite.BenchmarkPutGet1GBFiles) + b.Run("StreamEmptyFiles", benchsuite.BenchmarkStreamEmptyFiles) + b.Run("Stream1KBFiles", benchsuite.BenchmarkStream1KBFiles) + b.Run("Stream1MBFiles", benchsuite.BenchmarkStream1MBFiles) + b.Run("Stream1GBFiles", benchsuite.BenchmarkStream1GBFiles) + b.Run("List5Files", benchsuite.BenchmarkList5Files) + b.Run("List50Files", benchsuite.BenchmarkList50Files) + b.Run("Delete5Files", benchsuite.BenchmarkDelete5Files) + b.Run("Delete50Files", benchsuite.BenchmarkDelete50Files) +} + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files. +func (s *DriverBenchmarkSuite) BenchmarkPutGetEmptyFiles(b *testing.B) { + s.benchmarkPutGetFiles(b, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files. +func (s *DriverBenchmarkSuite) BenchmarkPutGet1KBFiles(b *testing.B) { + s.benchmarkPutGetFiles(b, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files. +func (s *DriverBenchmarkSuite) BenchmarkPutGet1MBFiles(b *testing.B) { + s.benchmarkPutGetFiles(b, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files. +func (s *DriverBenchmarkSuite) BenchmarkPutGet1GBFiles(b *testing.B) { + s.benchmarkPutGetFiles(b, 1024*1024*1024) +} + +func (s *DriverBenchmarkSuite) benchmarkPutGetFiles(b *testing.B, size int64) { + b.SetBytes(size) + parentDir := randomPath(8) + defer func() { + b.StopTimer() + // nolint:errcheck + s.StorageDriver.Delete(s.ctx, firstPart(parentDir)) + }() + + for i := 0; i < b.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := s.StorageDriver.PutContent(s.ctx, filename, randomContents(size)) + s.Suite.Require().NoError(err) + + _, err = s.StorageDriver.GetContent(s.ctx, filename) + s.Suite.Require().NoError(err) + } +} + +// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files. +func (s *DriverBenchmarkSuite) BenchmarkStreamEmptyFiles(b *testing.B) { + s.benchmarkStreamFiles(b, 0) +} + +// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files. +func (s *DriverBenchmarkSuite) BenchmarkStream1KBFiles(b *testing.B) { + s.benchmarkStreamFiles(b, 1024) +} + +// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files. +func (s *DriverBenchmarkSuite) BenchmarkStream1MBFiles(b *testing.B) { + s.benchmarkStreamFiles(b, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files. +func (s *DriverBenchmarkSuite) BenchmarkStream1GBFiles(b *testing.B) { + s.benchmarkStreamFiles(b, 1024*1024*1024) +} + +func (s *DriverBenchmarkSuite) benchmarkStreamFiles(b *testing.B, size int64) { + b.SetBytes(size) + parentDir := randomPath(8) + defer func() { + b.StopTimer() + // nolint:errcheck + s.StorageDriver.Delete(s.ctx, firstPart(parentDir)) + }() + + for i := 0; i < b.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + writer, err := s.StorageDriver.Writer(s.ctx, filename, false) + s.Suite.Require().NoError(err) + written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) + s.Suite.Require().NoError(err) + s.Suite.Require().Equal(size, written) + + err = writer.Commit(context.Background()) + s.Suite.Require().NoError(err) + err = writer.Close() + s.Suite.Require().NoError(err) + + rc, err := s.StorageDriver.Reader(s.ctx, filename, 0) + s.Suite.Require().NoError(err) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files. +func (s *DriverBenchmarkSuite) BenchmarkList5Files(b *testing.B) { + s.benchmarkListFiles(b, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files. +func (s *DriverBenchmarkSuite) BenchmarkList50Files(b *testing.B) { + s.benchmarkListFiles(b, 50) +} + +func (s *DriverBenchmarkSuite) benchmarkListFiles(b *testing.B, numFiles int64) { + parentDir := randomPath(8) + defer func() { + b.StopTimer() + // nolint:errcheck + s.StorageDriver.Delete(s.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := s.StorageDriver.PutContent(s.ctx, path.Join(parentDir, randomPath(32)), nil) + s.Suite.Require().NoError(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + files, err := s.StorageDriver.List(s.ctx, parentDir) + s.Suite.Require().NoError(err) + s.Suite.Require().Equal(numFiles, int64(len(files))) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files. +func (s *DriverBenchmarkSuite) BenchmarkDelete5Files(b *testing.B) { + s.benchmarkDeleteFiles(b, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files. +func (s *DriverBenchmarkSuite) BenchmarkDelete50Files(b *testing.B) { + s.benchmarkDeleteFiles(b, 50) +} + +func (s *DriverBenchmarkSuite) benchmarkDeleteFiles(b *testing.B, numFiles int64) { + for i := 0; i < b.N; i++ { + parentDir := randomPath(8) + defer s.deletePath(firstPart(parentDir)) + + b.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := s.StorageDriver.PutContent(s.ctx, path.Join(parentDir, randomPath(32)), nil) + s.Suite.Require().NoError(err) + } + b.StartTimer() + + // This is the operation we're benchmarking. + err := s.StorageDriver.Delete(s.ctx, firstPart(parentDir)) + s.Suite.Require().NoError(err) + } +} + +func (suite *DriverSuite) testFileStreams(size int64) { + tf, err := os.CreateTemp("", "tf") + suite.Require().NoError(err) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.deletePath(firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + suite.Require().NoError(err) + + err = tf.Sync() + suite.Require().NoError(err) + _, err = tf.Seek(0, io.SeekStart) + suite.Require().NoError(err) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + suite.Require().NoError(err) + nn, err := io.Copy(writer, tf) + suite.Require().NoError(err) + suite.Require().Equal(size, nn) + + err = writer.Commit(context.Background()) + suite.Require().NoError(err) + err = writer.Close() + suite.Require().NoError(err) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + + suite.Require().Equal(contents, readContents) +} + +func (suite *DriverSuite) writeReadCompare(filename string, contents []byte) { + defer suite.deletePath(firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + suite.Require().NoError(err) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + suite.Require().NoError(err) + + suite.Require().Equal(contents, readContents) +} + +func (suite *DriverSuite) writeReadCompareStreams(filename string, contents []byte) { + defer suite.deletePath(firstPart(filename)) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + suite.Require().NoError(err) + nn, err := io.Copy(writer, bytes.NewReader(contents)) + suite.Require().NoError(err) + suite.Require().Equal(int64(len(contents)), nn) + + err = writer.Commit(context.Background()) + suite.Require().NoError(err) + err = writer.Close() + suite.Require().NoError(err) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + suite.Require().NoError(err) + defer reader.Close() + + readContents, err := io.ReadAll(reader) + suite.Require().NoError(err) + + suite.Require().Equal(contents, readContents) +} + +var ( + filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") + separatorChars = []byte("._-") +) + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength, e := crand.Int(crand.Reader, big.NewInt(int64(len(filenameChars)))) + if e != nil { + log.Warn().Msgf("Error in securing random no: %s", e) + } + chunk := randomFilename(chunkLength.Int64()) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + c, e := crand.Int(crand.Reader, big.NewInt(int64(4))) + if e != nil { + log.Warn().Msgf("Error in securing random no: %s", e) + } + if !wasSeparator && i < len(b)-1 && c.Int64() == 0 { + c1, e1 := crand.Int(crand.Reader, big.NewInt(int64(len(separatorChars)))) + if e1 != nil { + log.Warn().Msgf("Error in securing random no: %s", e1) + } + b[i] = separatorChars[c1.Int64()] + wasSeparator = true + } else { + c2, e2 := crand.Int(crand.Reader, big.NewInt(int64(len(separatorChars)))) + if e2 != nil { + log.Warn().Msgf("Error in securing random no: %s", e2) + } + b[i] = filenameChars[c2.Int64()] + wasSeparator = false + } + } + return string(b) +} + +func randomContents(length int64) []byte { + return randomBytes[:length] +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + + toread := int64(len(p)) + if toread > rr.r { + toread = rr.r + } + n = copy(p, randomContents(toread)) + rr.r -= int64(n) + + if rr.r <= 0 { + err = io.EOF + } + + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/registry/app/driver/walk.go b/registry/app/driver/walk.go new file mode 100644 index 000000000..09ce80b95 --- /dev/null +++ b/registry/app/driver/walk.go @@ -0,0 +1,140 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "context" + "errors" + "path/filepath" + "sort" + "strings" + + "github.com/rs/zerolog/log" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// ErrFilledBuffer is used as a return value from onFileFunc to indicate +// that the requested number of entries has been reached and the walk can +// stop. +var ErrFilledBuffer = errors.New("we have enough entries") + +// WalkFn is called once per file by Walk. +type WalkFn func(fileInfo FileInfo) error + +// WalkFallback traverses a filesystem defined within driver, starting +// from the given path, calling f on each file. It uses the List method and Stat to drive itself. +// If the returned error from the WalkFn is ErrSkipDir the directory will not be entered and Walk +// will continue the traversal. If the returned error from the WalkFn is ErrFilledBuffer, the walk +// stops. +func WalkFallback( + ctx context.Context, + driver StorageDriver, + from string, + f WalkFn, + options ...func(*WalkOptions), +) error { + walkOptions := &WalkOptions{} + for _, o := range options { + o(walkOptions) + } + + startAfterHint := walkOptions.StartAfterHint + // Ensure that we are checking the hint is contained within from by adding a "/". + // Add to both in case the hint and form are the same, which would still count. + rel, err := filepath.Rel(from, startAfterHint) + if err != nil || strings.HasPrefix(rel, "..") { + // The startAfterHint is outside from, so check if we even need to walk anything + // Replace any path separators with \x00 so that the sort works in a depth-first way + if strings.ReplaceAll(startAfterHint, "/", "\x00") < strings.ReplaceAll(from, "/", "\x00") { + _, err := doWalkFallback(ctx, driver, from, "", f) + return err + } + return nil + } + // The startAfterHint is within from. + // Walk up the tree until we hit from - we know it is contained. + // Ensure startAfterHint is never deeper than a child of the base + // directory so that doWalkFallback doesn't have to worry about + // depth-first comparisons + base := startAfterHint + for strings.HasPrefix(base, from) { + _, err = doWalkFallback(ctx, driver, base, startAfterHint, f) + if !(errors.As(err, &PathNotFoundError{}) || err == nil) { + return err + } + if base == from { + break + } + startAfterHint = base + base, _ = filepath.Split(startAfterHint) + if len(base) > 1 { + base = strings.TrimSuffix(base, "/") + } + } + return nil +} + +// doWalkFallback performs a depth first walk using recursion. +// from is the directory that this iteration of the function should walk. +// startAfterHint is the child within from to start the walk after. +// It should only ever be a child of from, or the empty string. +func doWalkFallback( + ctx context.Context, + driver StorageDriver, + from string, + startAfterHint string, + f WalkFn, +) (bool, error) { + children, err := driver.List(ctx, from) + if err != nil { + return false, err + } + sort.Strings(children) + for _, child := range children { + // The startAfterHint has been sanitised in WalkFallback and will either be + // empty, or be suitable for an <= check for this _from_. + if child <= startAfterHint { + continue + } + + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + if errors.As(err, &PathNotFoundError{}) { + // repository was removed in between listing and enumeration. Ignore it. + log.Ctx(ctx).Info().Interface("path", child).Msg("ignoring deleted path") + } else { + return false, err + } + } + err = f(fileInfo) + switch { + case err == nil && fileInfo.IsDir(): + if ok, err := doWalkFallback(ctx, driver, child, startAfterHint, f); err != nil || !ok { + return ok, err + } + case errors.Is(err, ErrFilledBuffer): + return false, nil // no error but stop iteration + case err != nil: + return false, err + } + } + return true, nil +} diff --git a/registry/app/manifest/descriptor.go b/registry/app/manifest/descriptor.go new file mode 100644 index 000000000..5a450f432 --- /dev/null +++ b/registry/app/manifest/descriptor.go @@ -0,0 +1,49 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` +} diff --git a/registry/app/manifest/doc.go b/registry/app/manifest/doc.go new file mode 100644 index 000000000..13b4b41a9 --- /dev/null +++ b/registry/app/manifest/doc.go @@ -0,0 +1,17 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest diff --git a/registry/app/manifest/errors.go b/registry/app/manifest/errors.go new file mode 100644 index 000000000..96c805172 --- /dev/null +++ b/registry/app/manifest/errors.go @@ -0,0 +1,157 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version. +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed. +var ErrUnsupported = errors.New("operation unsupported") + +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it. +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + +// TagUnknownError is returned if the given tag is not known by the tag service. +type TagUnknownError struct { + Tag string +} + +func (err TagUnknownError) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// RegistryUnknownError is returned if the named repository is not known by +// the registry. +type RegistryUnknownError struct { + Name string +} + +func (err RegistryUnknownError) Error() string { + return fmt.Sprintf("unknown registry name=%s", err.Name) +} + +// RegistryNameInvalidError should be used to denote an invalid registry +// name. Reason may set, indicating the cause of invalidity. +type RegistryNameInvalidError struct { + Name string + Reason error +} + +func (err RegistryNameInvalidError) Error() string { + return fmt.Sprintf("registry name %q invalid: %v", err.Name, err.Reason) +} + +// ManifestUnknownError is returned if the manifest is not known by the +// registry. +type UnknownError struct { + Name string + Tag string +} + +func (err UnknownError) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ManifestUnknownRevisionError is returned when a manifest cannot be found by +// revision within a registry. +type UnknownRevisionError struct { + Name string + Revision digest.Digest +} + +func (err UnknownRevisionError) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ManifestUnverifiedError is returned when the registry is unable to verify +// the manifest. +type UnverifiedError struct{} + +func (UnverifiedError) Error() string { + return "unverified manifest" +} + +// ManifestReferencesExceedLimitError is returned when a manifest has too many references. +type ReferencesExceedLimitError struct { + References int + Limit int +} + +func (err ReferencesExceedLimitError) Error() string { + return fmt.Sprintf("%d manifest references exceed reference limit of %d", err.References, err.Limit) +} + +// ManifestPayloadSizeExceedsLimitError is returned when a manifest is bigger than the configured payload +// size limit. +type PayloadSizeExceedsLimitError struct { + PayloadSize int + Limit int +} + +// Error implements the error interface for ManifestPayloadSizeExceedsLimitError. +func (err PayloadSizeExceedsLimitError) Error() string { + return fmt.Sprintf("manifest payload size of %d exceeds limit of %d", err.PayloadSize, err.Limit) +} + +// ManifestVerificationErrors provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type VerificationErrors []error + +func (errs VerificationErrors) Error() string { + parts := make([]string, 0, len(errs)) + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ManifestBlobUnknownError returned when a referenced blob cannot be found. +type BlobUnknownError struct { + Digest digest.Digest +} + +func (err BlobUnknownError) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ManifestNameInvalidError should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type NameInvalidError struct { + Name string + Reason error +} + +func (err NameInvalidError) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/registry/app/manifest/manifestlist/manifestlist.go b/registry/app/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000..82a710b35 --- /dev/null +++ b/registry/app/manifest/manifestlist/manifestlist.go @@ -0,0 +1,217 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/manifest" + + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // MediaTypeManifestList specifies the mediaType for manifest lists. + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +) + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, manifest.Descriptor{}, err + } + + if m.MediaType != MediaTypeManifestList { + err = fmt.Errorf( + "mediaType in manifest list should be '%s' not '%s'", + MediaTypeManifestList, m.MediaType, + ) + + return nil, manifest.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, manifest.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := manifest.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + manifest.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Manifests references a list of manifests + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []manifest.Descriptor { + dependencies := make([]manifest.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + dependencies[i].Platform = &v1.Platform{ + Architecture: m.Manifests[i].Platform.Architecture, + OS: m.Manifests[i].Platform.OS, + OSVersion: m.Manifests[i].Platform.OSVersion, + OSFeatures: m.Manifests[i].Platform.OSFeatures, + Variant: m.Manifests[i].Platform.Variant, + } + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + return fromDescriptorsWithMediaType(descriptors, MediaTypeManifestList) +} + +// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly. +func fromDescriptorsWithMediaType( + descriptors []ManifestDescriptor, + mediaType string, +) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: SchemaVersion.SchemaVersion, + MediaType: mediaType, + }, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} + +// validateManifestList returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a manifest. +func validateManifestList(b []byte) error { + var doc struct { + Config interface{} `json:"config,omitempty"` + Layers interface{} `json:"layers,omitempty"` + } + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Config != nil || doc.Layers != nil { + return errors.New("manifestlist: expected list but found manifest") + } + return nil +} diff --git a/registry/app/manifest/manifestlist/manifestlist_test.go b/registry/app/manifest/manifestlist/manifestlist_test.go new file mode 100644 index 000000000..47cbe308b --- /dev/null +++ b/registry/app/manifest/manifestlist/manifestlist_test.go @@ -0,0 +1,236 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifestlist + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/manifest/schema2" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +const expectedManifestListSerialization = `{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "size": 985, + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "size": 2392, + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}` + +func makeTestManifestList(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: manifest.Descriptor{ + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: manifest.Descriptor{ + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := fromDescriptorsWithMediaType(manifestDescriptors, mediaType) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + return manifestDescriptors, deserialized +} + +func TestManifestList(t *testing.T) { + manifestDescriptors, deserialized := makeTestManifestList(t, MediaTypeManifestList) + mediaType, canonical, _ := deserialized.Payload() + + if mediaType != MediaTypeManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + expected, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(expected, canonical) { + t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal([]byte(expectedManifestListSerialization), canonical) { + t.Fatalf( + "manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", + expectedManifestListSerialization, + string(canonical), + ) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + platform := manifestDescriptors[i].Platform + expectedPlatform := &v1.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSFeatures: platform.OSFeatures, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + } + if !reflect.DeepEqual(references[i].Platform, expectedPlatform) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + references[i].Platform = nil + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} + +func mediaTypeTest(contentType string, mediaType string, shouldError bool) func(*testing.T) { + return func(t *testing.T) { + var m *DeserializedManifestList + _, m = makeTestManifestList(t, mediaType) + + _, canonical, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload, %v", err) + } + + unmarshalled, descriptor, err := manifest.UnmarshalManifest( + contentType, + canonical, + ) + + if shouldError { + if err == nil { + t.Fatalf("bad content type should have produced error") + } + return + } + if err != nil { + t.Fatalf("error unmarshaling manifest, %v", err) + } + + asManifest, ok := unmarshalled.(*DeserializedManifestList) + if !ok { + t.Fatalf("Error: unmarshalled is not of type *DeserializedManifestLis") + return + } + if asManifest.MediaType != mediaType { + t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType) + } + + if descriptor.MediaType != contentType { + t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType) + } + + unmarshalledMediaType, _, _ := unmarshalled.Payload() + if unmarshalledMediaType != contentType { + t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType) + } + } +} + +func TestMediaTypes(t *testing.T) { + t.Run("ManifestList_No_MediaType", mediaTypeTest(MediaTypeManifestList, "", true)) + t.Run("ManifestList", mediaTypeTest(MediaTypeManifestList, MediaTypeManifestList, false)) + t.Run("ManifestList_Bad_MediaType", mediaTypeTest(MediaTypeManifestList, MediaTypeManifestList+"XXX", true)) +} + +func TestValidateManifestList(t *testing.T) { + man := schema2.Manifest{ + Config: manifest.Descriptor{Size: 1}, + Layers: []manifest.Descriptor{{Size: 2}}, + } + manifestList := ManifestList{ + Manifests: []ManifestDescriptor{ + {Descriptor: manifest.Descriptor{Size: 3}}, + }, + } + t.Run( + "valid", func(t *testing.T) { + b, err := json.Marshal(manifestList) + if err != nil { + t.Fatal("unexpected error marshaling manifest list", err) + } + if err := validateManifestList(b); err != nil { + t.Error("list should be valid", err) + } + }, + ) + t.Run( + "invalid", func(t *testing.T) { + b, err := json.Marshal(man) + if err != nil { + t.Fatal("unexpected error marshaling manifest", err) + } + if err := validateManifestList(b); err == nil { + t.Error("manifest should not be valid") + } + }, + ) +} diff --git a/registry/app/manifest/manifests.go b/registry/app/manifest/manifests.go new file mode 100644 index 000000000..984c5ad96 --- /dev/null +++ b/registry/app/manifest/manifests.go @@ -0,0 +1,160 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target. +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +type ManifestV2 interface { + Manifest + Version() Versioned + Config() Descriptor + Layers() []Descriptor + + // TotalSize is the sum of the size of the manifest payload, layer and config + // blob sizes. + TotalSize() int64 + // DistributableLayers is a slice of distributable image layers. This is a subset of Layers, excluding items with + // media types that are known to identify non-distributable layers. + DistributableLayers() []Descriptor +} + +// ManifestOCI extends ManifestV2 with property descriptions from the +// OCI Image Manifest specification (v1.1). +// https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions +type ManifestOCI interface { + ManifestV2 + + // This OPTIONAL property contains the type of an artifact when the + // manifest is used for an artifact. + ArtifactType() string + + // This OPTIONAL property specifies a descriptor of another manifest. + // This value, used by the referrers API, indicates a relationship + // to the specified manifest. + Subject() Descriptor + + Annotations() map[string]string +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestEnumerator enables iterating over manifests. +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors. +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType. +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType. +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf( + "unsupported manifest media type and no default available: %s", + mediaType, + ) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific. +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/registry/app/manifest/ocischema/index.go b/registry/app/manifest/ocischema/index.go new file mode 100644 index 000000000..58d6b78a6 --- /dev/null +++ b/registry/app/manifest/ocischema/index.go @@ -0,0 +1,210 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/manifest" + + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IndexSchemaVersion provides a pre-initialized version structure for OCI Image +// Indices. +var IndexSchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageIndex, +} + +func init() { + imageIndexFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) { + if err := validateIndex(b); err != nil { + return nil, manifest.Descriptor{}, err + } + m := new(DeserializedImageIndex) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, manifest.Descriptor{}, err + } + + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf( + "if present, mediaType in image index should be '%s' not '%s'", + v1.MediaTypeImageIndex, m.MediaType, + ) + + return nil, manifest.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, manifest.Descriptor{ + MediaType: v1.MediaTypeImageIndex, + Digest: dgst, + Size: int64(len(b)), + Annotations: m.Annotations(), + }, err + } + err := manifest.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } +} + +// ImageIndex references manifests for various platforms. +type ImageIndex struct { + manifest.Versioned + + // This OPTIONAL property contains the type of an artifact when the + // manifest is used for an artifact. This MUST be set when + // config.mediaType is set to the empty value. + ArtifactType string `json:"artifactType,omitempty"` + + // Manifests references a list of manifests + Manifests []manifest.Descriptor `json:"manifests"` + + // Annotations is an optional field that contains arbitrary metadata for the + // image index + Annotations map[string]string `json:"annotations,omitempty"` + + // This OPTIONAL property specifies a descriptor of another manifest. + // This value, used by the referrers API, indicates a relationship to + // the specified manifest. + Subject *manifest.Descriptor `json:"subject,omitempty"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (ii ImageIndex) References() []manifest.Descriptor { + return ii.Manifests +} + +// DeserializedImageIndex wraps ManifestList with a copy of the original +// JSON. +type DeserializedImageIndex struct { + ImageIndex + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors and a map of annotations, and +// returns a DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. If annotations is nil or empty then the +// annotations property will be omitted from the JSON representation. +func FromDescriptors(descriptors []manifest.Descriptor, annotations map[string]string) ( + *DeserializedImageIndex, error, +) { + return fromDescriptorsWithMediaType(descriptors, annotations, v1.MediaTypeImageIndex) +} + +// fromDescriptorsWithMediaType is for testing purposes, +// it's useful to be able to specify the media type explicitly. +func fromDescriptorsWithMediaType( + descriptors []manifest.Descriptor, + annotations map[string]string, + mediaType string, +) (_ *DeserializedImageIndex, err error) { + m := ImageIndex{ + Versioned: manifest.Versioned{ + SchemaVersion: IndexSchemaVersion.SchemaVersion, + MediaType: mediaType, + }, + Annotations: annotations, + } + + m.Manifests = make([]manifest.Descriptor, len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedImageIndex{ + ImageIndex: m, + } + + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedImageIndex) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ImageIndex + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ImageIndex = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedImageIndex) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedImageIndex") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedImageIndex) Payload() (string, []byte, error) { + mediaType := m.MediaType + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } + + return mediaType, m.canonical, nil +} + +// validateIndex returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a manifest. +func validateIndex(b []byte) error { + var doc struct { + Config interface{} `json:"config,omitempty"` + Layers interface{} `json:"layers,omitempty"` + } + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Config != nil || doc.Layers != nil { + return errors.New("index: expected index but found manifest") + } + return nil +} +func (m *DeserializedImageIndex) ArtifactType() string { return m.ImageIndex.ArtifactType } +func (m *DeserializedImageIndex) Subject() manifest.Descriptor { + if m.ImageIndex.Subject == nil { + return manifest.Descriptor{} + } + return *m.ImageIndex.Subject +} + +func (m *DeserializedImageIndex) Annotations() map[string]string { + if m.ImageIndex.Annotations == nil { + return map[string]string{} + } + return m.ImageIndex.Annotations +} diff --git a/registry/app/manifest/ocischema/manifest.go b/registry/app/manifest/ocischema/manifest.go new file mode 100644 index 000000000..b75c522f5 --- /dev/null +++ b/registry/app/manifest/ocischema/manifest.go @@ -0,0 +1,220 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/manifest" + + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// SchemaVersion provides a pre-initialized version structure for OCI Image +// Manifests. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageManifest, +} + +func init() { + ocischemaFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) { + if err := validateManifest(b); err != nil { + return nil, manifest.Descriptor{}, err + } + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, manifest.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, manifest.Descriptor{ + MediaType: v1.MediaTypeImageManifest, + Digest: dgst, + Size: int64(len(b)), + Annotations: m.Annotations(), + }, err + } + err := manifest.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a ocischema manifest. +type Manifest struct { + manifest.Versioned + + // This OPTIONAL property contains the type of an artifact when the + // manifest is used for an artifact. This MUST be set when + // config.mediaType is set to the empty value. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references the image configuration as a blob. + Config manifest.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []manifest.Descriptor `json:"layers"` + + // This OPTIONAL property specifies a descriptor of another manifest. + // This value, used by the referrers API, indicates a relationship to + // the specified manifest. + Subject *manifest.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// References returns the descriptors of this manifests references. +func (m Manifest) References() []manifest.Descriptor { + references := make([]manifest.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this manifest. +func (m Manifest) Target() manifest.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var mfst Manifest + if err := json.Unmarshal(m.canonical, &mfst); err != nil { + return err + } + + if mfst.MediaType != "" && mfst.MediaType != v1.MediaTypeImageManifest { + return fmt.Errorf( + "if present, mediaType in manifest should be '%s' not '%s'", + v1.MediaTypeImageManifest, mfst.MediaType, + ) + } + + m.Manifest = mfst + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return v1.MediaTypeImageManifest, m.canonical, nil +} + +// validateManifest returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a index. +func validateManifest(b []byte) error { + var doc struct { + Manifests interface{} `json:"manifests,omitempty"` + } + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Manifests != nil { + return errors.New("ocimanifest: expected manifest but found index") + } + return nil +} + +func (m *DeserializedManifest) Version() manifest.Versioned { + // Media type can be either Docker (`application/vnd.docker.distribution.manifest.v2+json`) or OCI (empty). + // We need to make it explicit if empty, otherwise we're not able to distinguish between media types. + if m.Versioned.MediaType == "" { + m.Versioned.MediaType = v1.MediaTypeImageManifest + } + + return m.Versioned +} + +func (m *DeserializedManifest) Config() manifest.Descriptor { return m.Target() } +func (m *DeserializedManifest) Layers() []manifest.Descriptor { return m.Manifest.Layers } +func (m *DeserializedManifest) DistributableLayers() []manifest.Descriptor { + var ll []manifest.Descriptor + for _, l := range m.Layers() { + switch l.MediaType { + case v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: + continue + } + ll = append(ll, l) + } + return ll +} +func (m *DeserializedManifest) ArtifactType() string { return m.Manifest.ArtifactType } +func (m *DeserializedManifest) Subject() manifest.Descriptor { + if m.Manifest.Subject == nil { + return manifest.Descriptor{} + } + return *m.Manifest.Subject +} + +func (m *DeserializedManifest) Annotations() map[string]string { + if m.Manifest.Annotations == nil { + return map[string]string{} + } + return m.Manifest.Annotations +} + +func (m *DeserializedManifest) TotalSize() int64 { + var layersSize int64 + for _, layer := range m.Layers() { + layersSize += layer.Size + } + + return layersSize + m.Config().Size + int64(len(m.canonical)) +} diff --git a/registry/app/manifest/schema2/manifest.go b/registry/app/manifest/schema2/manifest.go new file mode 100644 index 000000000..1fd534765 --- /dev/null +++ b/registry/app/manifest/schema2/manifest.go @@ -0,0 +1,181 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/manifest" + + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeImageConfig specifies the mediaType for the image configuration. + MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + + // MediaTypeUncompressedLayer is the mediaType used for layers which + // are not compressed. + MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" +) + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, +} + +func init() { + schema2Func := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, manifest.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, manifest.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := manifest.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config manifest.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []manifest.Descriptor `json:"layers"` +} + +// References returns the descriptors of this manifests references. +func (m Manifest) References() []manifest.Descriptor { + references := make([]manifest.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this manifest. +func (m Manifest) Target() manifest.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var mfst Manifest + if err := json.Unmarshal(m.canonical, &mfst); err != nil { + return err + } + + if mfst.MediaType != MediaTypeManifest { + return fmt.Errorf( + "mediaType in manifest should be '%s' not '%s'", + MediaTypeManifest, mfst.MediaType, + ) + } + + m.Manifest = mfst + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +func (m *DeserializedManifest) Version() manifest.Versioned { return m.Versioned } +func (m *DeserializedManifest) Config() manifest.Descriptor { return m.Target() } +func (m *DeserializedManifest) Layers() []manifest.Descriptor { return m.Manifest.Layers } +func (m *DeserializedManifest) DistributableLayers() []manifest.Descriptor { + var ll []manifest.Descriptor + for _, l := range m.Layers() { + if l.MediaType != MediaTypeForeignLayer { + ll = append(ll, l) + } + } + return ll +} + +func (m *DeserializedManifest) TotalSize() int64 { + var layersSize int64 + for _, layer := range m.Layers() { + layersSize += layer.Size + } + + return layersSize + m.Config().Size + int64(len(m.canonical)) +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/registry/app/manifest/schema2/manifest_test.go b/registry/app/manifest/schema2/manifest_test.go new file mode 100644 index 000000000..161694bf3 --- /dev/null +++ b/registry/app/manifest/schema2/manifest_test.go @@ -0,0 +1,186 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema2 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/harness/gitness/registry/app/manifest" +) + +const expectedManifestSerialization = `{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "size": 985 + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + "size": 153263 + } + ] +}` + +func makeTestManifest(mediaType string) Manifest { + return Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, + Config: manifest.Descriptor{ + MediaType: MediaTypeImageConfig, + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + }, + Layers: []manifest.Descriptor{ + { + MediaType: MediaTypeLayer, + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + }, + }, + } +} + +func TestManifest(t *testing.T) { + mfst := makeTestManifest(MediaTypeManifest) + + deserialized, err := FromStruct(mfst) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, _ := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + expected, err := json.MarshalIndent(&mfst, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(expected, canonical) { + t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal([]byte(expectedManifestSerialization), canonical) { + t.Fatalf( + "manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", + expectedManifestSerialization, + string(canonical), + ) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeImageConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + + if !reflect.DeepEqual(references[0], target) { + t.Fatalf("first reference should be target: %v != %v", references[0], target) + } + + // Test the second reference + if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[1].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[1].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} + +func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) { + mfst := makeTestManifest(mediaType) + + deserialized, err := FromStruct(mfst) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + unmarshalled, descriptor, err := manifest.UnmarshalManifest( + MediaTypeManifest, + deserialized.canonical, + ) + + if shouldError { + if err == nil { + t.Fatalf("bad content type should have produced error") + } + return + } + if err != nil { + t.Fatalf("error unmarshaling manifest, %v", err) + } + + asManifest, ok := unmarshalled.(*DeserializedManifest) + if !ok { + t.Fatalf("Error: unmarshalled is not of type *DeserializedManifest") + return + } + if asManifest.MediaType != mediaType { + t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType) + } + + if descriptor.MediaType != MediaTypeManifest { + t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType) + } + + unmarshalledMediaType, _, _ := unmarshalled.Payload() + if unmarshalledMediaType != MediaTypeManifest { + t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType) + } +} + +func TestMediaTypes(t *testing.T) { + mediaTypeTest(t, "", true) + mediaTypeTest(t, MediaTypeManifest, false) + mediaTypeTest(t, MediaTypeManifest+"XXX", true) +} diff --git a/registry/app/manifest/versioned.go b/registry/app/manifest/versioned.go new file mode 100644 index 000000000..0cb1e705c --- /dev/null +++ b/registry/app/manifest/versioned.go @@ -0,0 +1,28 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/registry/app/pkg/artifact.go b/registry/app/pkg/artifact.go new file mode 100644 index 000000000..f94d4f5ad --- /dev/null +++ b/registry/app/pkg/artifact.go @@ -0,0 +1,19 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +type Artifact interface { + GetArtifactType() string +} diff --git a/registry/app/pkg/commons/request.go b/registry/app/pkg/commons/request.go new file mode 100644 index 000000000..7759934df --- /dev/null +++ b/registry/app/pkg/commons/request.go @@ -0,0 +1,91 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package commons + +import ( + "net/http" + "reflect" +) + +const ( + HeaderAccept = "Accept" + HeaderAuthorization = "Authorization" + HeaderCacheControl = "Cache-Control" + HeaderContentLength = "Content-Length" + HeaderContentRange = "Content-Range" + HeaderContentType = "Content-Type" + HeaderDockerContentDigest = "Docker-Content-Digest" + HeaderDockerUploadUUID = "Docker-Upload-UUID" + HeaderEtag = "Etag" + HeaderIfNoneMatch = "If-None-Match" + HeaderLink = "Link" + HeaderLocation = "Location" + HeaderOCIFiltersApplied = "OCI-Filters-Applied" + HeaderOCISubject = "OCI-Subject" + HeaderRange = "Range" +) + +type ResponseHeaders struct { + Headers map[string]string + Code int +} + +func IsEmpty(slice interface{}) bool { + if slice == nil { + return true + } + val := reflect.ValueOf(slice) + + // Check if the input is a pointer + if val.Kind() == reflect.Ptr { + // Dereference the pointer + val = val.Elem() + } + + // Check if the dereferenced value is nil + if !val.IsValid() { + return true + } + + return val.Len() == 0 +} + +func (r *ResponseHeaders) WriteToResponse(w http.ResponseWriter) { + if w == nil || r == nil { + return + } + + if r.Headers != nil { + for key, value := range r.Headers { + w.Header().Set(key, value) + } + } + + if r.Code != 0 { + w.WriteHeader(r.Code) + } +} + +func (r *ResponseHeaders) WriteHeadersToResponse(w http.ResponseWriter) { + if w == nil || r == nil { + return + } + + if r.Headers != nil { + for key, value := range r.Headers { + w.Header().Set(key, value) + } + } +} diff --git a/registry/app/pkg/context.go b/registry/app/pkg/context.go new file mode 100644 index 000000000..a3820b3ab --- /dev/null +++ b/registry/app/pkg/context.go @@ -0,0 +1,48 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +import ( + v2 "github.com/distribution/distribution/v3/registry/api/v2" +) + +type BaseInfo struct { + ParentID int64 + RootIdentifier string + RootParentID int64 +} + +type ArtifactInfo struct { + *BaseInfo + RegIdentifier string + Image string +} + +type RegistryInfo struct { + *ArtifactInfo + Reference string + Digest string + Tag string + URLBuilder *v2.URLBuilder + Path string +} + +func (r *RegistryInfo) SetReference(ref string) { + r.Reference = ref +} + +func (a *ArtifactInfo) SetRepoKey(key string) { + a.RegIdentifier = key +} diff --git a/registry/app/pkg/core_controller.go b/registry/app/pkg/core_controller.go new file mode 100644 index 000000000..3927a78c8 --- /dev/null +++ b/registry/app/pkg/core_controller.go @@ -0,0 +1,83 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +import ( + "context" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + store2 "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" +) + +type ArtifactType int + +const ( + LocalRegistry ArtifactType = 1 << iota + RemoteRegistry +) + +var TypeRegistry = map[ArtifactType]Artifact{} + +type CoreController struct { + RegistryDao store2.RegistryRepository +} + +func NewCoreController(registryDao store2.RegistryRepository) *CoreController { + return &CoreController{ + RegistryDao: registryDao, + } +} + +func (c *CoreController) factory(t ArtifactType) Artifact { + switch t { + case LocalRegistry: + return TypeRegistry[t] + case RemoteRegistry: + return TypeRegistry[t] + default: + log.Error().Stack().Msgf("Invalid artifact type %v", t) + return nil + } +} + +func (c *CoreController) GetArtifact(registry types.Registry) Artifact { + if string(registry.Type) == string(artifact.RegistryTypeVIRTUAL) { + return c.factory(LocalRegistry) + } + return c.factory(RemoteRegistry) +} + +func (c *CoreController) GetOrderedRepos( + ctx context.Context, + repoKey string, + artInfo RegistryInfo, +) ([]types.Registry, error) { + var result []types.Registry + if registry, err := c.RegistryDao.GetByParentIDAndName(ctx, artInfo.ParentID, repoKey); err == nil { + result = append(result, *registry) + proxies := registry.UpstreamProxies + if len(proxies) > 0 { + upstreamRepos, _ := c.RegistryDao.GetByIDIn(ctx, artInfo.ParentID, proxies) + result = append(result, *upstreamRepos...) + } + } else { + return result, err + } + + return result, nil +} diff --git a/registry/app/pkg/docker/app.go b/registry/app/pkg/docker/app.go new file mode 100644 index 000000000..3b96d1b7d --- /dev/null +++ b/registry/app/pkg/docker/app.go @@ -0,0 +1,128 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "crypto/rand" + "fmt" + + corestore "github.com/harness/gitness/app/store" + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/dist_temp/errcode" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/pkg" + registrystorage "github.com/harness/gitness/registry/app/storage" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/gc" + "github.com/harness/gitness/types" + + "github.com/jmoiron/sqlx" + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config *types.Config + storageService *registrystorage.Service +} + +// NewApp takes a configuration and returns a configured app. +func NewApp( + ctx context.Context, sqlDB *sqlx.DB, storageDeleter storagedriver.StorageDeleter, + blobRepo store.BlobRepository, spaceStore corestore.SpaceStore, + cfg *types.Config, storageService *registrystorage.Service, + mtRepository store.MediaTypesRepository, manifestRepository store.ManifestRepository, + gcService gc.Service, +) *App { + app := &App{ + Context: ctx, + Config: cfg, + storageService: storageService, + } + app.configureSecret(cfg) + gcService.Start(ctx, sqlDB, spaceStore, blobRepo, storageDeleter, cfg, mtRepository, manifestRepository) + return app +} + +func GetStorageService(cfg *types.Config, driver storagedriver.StorageDriver) *registrystorage.Service { + options := registrystorage.GetRegistryOptions() + if cfg.Registry.Storage.S3Storage.Delete { + options = append(options, registrystorage.EnableDelete) + } + + if cfg.Registry.Storage.S3Storage.Redirect { + options = append(options, registrystorage.EnableRedirect) + } else { + log.Info().Msg("backend redirection disabled") + } + + storageService, err := registrystorage.NewStorageService(driver, options...) + if err != nil { + panic("could not create storage service: " + err.Error()) + } + return storageService +} + +func LogError(errList errcode.Errors) { + for _, e1 := range errList { + log.Error().Err(e1).Msgf("error: %v", e1) + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *types.Config) { + if configuration.Registry.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.Registry.HTTP.Secret = string(secretBytes[:]) + dcontext.GetLogger(app, log.Warn()). + Msg( + "No HTTP secret provided - generated random secret. This may cause problems with uploads if" + + " multiple registries are behind a load-balancer. To provide a shared secret," + + " set the GITNESS_REGISTRY_HTTP_SECRET environment variable.", + ) + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) GetBlobsContext(c context.Context, info pkg.RegistryInfo) *Context { + context := &Context{ + App: app, + Context: c, + UUID: info.Reference, + Digest: digest.Digest(info.Digest), + } + context.URLBuilder = info.URLBuilder + blobStore := app.storageService.OciBlobsStore(c, info.RegIdentifier, info.RootIdentifier) + context.OciBlobStore = blobStore + + return context +} diff --git a/registry/app/pkg/docker/catalog.go b/registry/app/pkg/docker/catalog.go new file mode 100644 index 000000000..761c36b85 --- /dev/null +++ b/registry/app/pkg/docker/catalog.go @@ -0,0 +1,129 @@ +// Source: https://gitlab.com/gitlab-org/container-registry + +// Copyright 2019 Gitlab Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "encoding/base64" + "fmt" + "net/url" + "strconv" + + repostore "github.com/harness/gitness/registry/app/store/database" + "github.com/harness/gitness/registry/types" +) + +const ( + linkPrevious = "previous" + linkNext = "next" + encodingSeparator = "|" + NQueryParamKey = "n" + PublishedAtQueryParamKey = "published_at" + BeforeQueryParamKey = "before" + TagNameQueryParamKey = "name" + SortQueryParamKey = "sort" + LastQueryParamKey = "last" +) + +// Use the original URL from the request to create a new URL for +// the link header. +func CreateLinkEntry( + origURL string, + filters types.FilterParams, + publishedBefore, publishedLast string, +) (string, error) { + var combinedURL string + + if filters.BeforeEntry != "" { + beforeURL, err := generateLink(origURL, linkPrevious, filters, publishedBefore, publishedLast) + if err != nil { + return "", err + } + combinedURL = beforeURL + } + + if filters.LastEntry != "" { + lastURL, err := generateLink(origURL, linkNext, filters, publishedBefore, publishedLast) + if err != nil { + return "", err + } + + if filters.BeforeEntry == "" { + combinedURL = lastURL + } else { + // Put the "previous" URL first and then "next" as shown in the + // RFC5988 examples https://datatracker.ietf.org/doc/html/rfc5988#section-5.5. + combinedURL = fmt.Sprintf("%s, %s", combinedURL, lastURL) + } + } + + return combinedURL, nil +} + +func generateLink( + originalURL, rel string, + filters types.FilterParams, + publishedBefore, publishedLast string, +) (string, error) { + calledURL, err := url.Parse(originalURL) + if err != nil { + return "", err + } + + qValues := url.Values{} + qValues.Add(NQueryParamKey, strconv.Itoa(filters.MaxEntries)) + + switch rel { + case linkPrevious: + before := filters.BeforeEntry + if filters.OrderBy == PublishedAtQueryParamKey && publishedBefore != "" { + before = EncodeFilter(publishedBefore, filters.BeforeEntry) + } + qValues.Add(BeforeQueryParamKey, before) + case linkNext: + last := filters.LastEntry + if filters.OrderBy == PublishedAtQueryParamKey && publishedLast != "" { + last = EncodeFilter(publishedLast, filters.LastEntry) + } + qValues.Add(LastQueryParamKey, last) + } + + if filters.Name != "" { + qValues.Add(TagNameQueryParamKey, filters.Name) + } + + orderBy := filters.OrderBy + if orderBy != "" { + if filters.SortOrder == repostore.OrderDesc { + orderBy = "-" + orderBy + } + qValues.Add(SortQueryParamKey, orderBy) + } + + calledURL.RawQuery = qValues.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"%s\"", calledURL.String(), rel) + + return urlStr, nil +} + +// EncodeFilter base64 encode by concatenating the published_at value with the tagName using an encodingSeparator. +func EncodeFilter(publishedAt, tagName string) (v string) { + return base64.StdEncoding.EncodeToString( + []byte(fmt.Sprintf("%s%s%s", publishedAt, encodingSeparator, tagName)), + ) +} diff --git a/registry/app/pkg/docker/compat.go b/registry/app/pkg/docker/compat.go new file mode 100644 index 000000000..13f458ed4 --- /dev/null +++ b/registry/app/pkg/docker/compat.go @@ -0,0 +1,135 @@ +// Source: https://gitlab.com/gitlab-org/container-registry + +// Copyright 2019 Gitlab Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/manifest/manifestlist" + "github.com/harness/gitness/registry/app/manifest/ocischema" + "github.com/harness/gitness/registry/app/manifest/schema2" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version. + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" +) + +// MediaTypeBuildxCacheConfig is the mediatype associated with buildx +// cache config blobs. This should be unique to buildx. +var MediaTypeBuildxCacheConfig = "application/vnd.buildkit.cacheconfig.v0" + +// SplitReferences contains two lists of manifest list references broken down +// into either blobs or manifests. The result of appending these two lists +// together should include all of the descriptors returned by +// ManifestList.References with no duplicates, additions, or omissions. +type SplitReferences struct { + Manifests []manifest.Descriptor + Blobs []manifest.Descriptor +} + +// References returns the references of the DeserializedManifestList split into +// manifests and layers based on the mediatype of the standard list of +// descriptors. Only known manifest mediatypes will be sorted into the manifests +// array while everything else will be sorted into blobs. Helm chart manifests +// do not include a mediatype at the time of this commit, but they are unlikely +// to be included within a manifest list. +func References(ml *manifestlist.DeserializedManifestList) SplitReferences { + var ( + manifests = make([]manifest.Descriptor, 0) + blobs = make([]manifest.Descriptor, 0) + ) + + for _, r := range ml.References() { + switch r.MediaType { + case schema2.MediaTypeManifest, + manifestlist.MediaTypeManifestList, + v1.MediaTypeImageManifest, + MediaTypeSignedManifest, + MediaTypeManifest: + + manifests = append(manifests, r) + default: + blobs = append(blobs, r) + } + } + + return SplitReferences{Manifests: manifests, Blobs: blobs} +} + +// LikelyBuildxCache returns true if the manifest list is likely a buildx cache +// manifest based on the unique buildx config mediatype. +func LikelyBuildxCache(ml *manifestlist.DeserializedManifestList) bool { + blobs := References(ml).Blobs + + for _, desc := range blobs { + if desc.MediaType == MediaTypeBuildxCacheConfig { + return true + } + } + + return false +} + +// ContainsBlobs returns true if the manifest list contains any blobs. +func ContainsBlobs(ml *manifestlist.DeserializedManifestList) bool { + return len(References(ml).Blobs) > 0 +} + +func OCIManifestFromBuildkitIndex(ml *manifestlist.DeserializedManifestList) (*ocischema.DeserializedManifest, error) { + refs := References(ml) + if len(refs.Manifests) > 0 { + return nil, errors.New("buildkit index has unexpected manifest references") + } + + // set "config" and "layer" references apart. + var cfg *manifest.Descriptor + var layers []manifest.Descriptor + for _, ref := range refs.Blobs { + refCopy := ref + if refCopy.MediaType == MediaTypeBuildxCacheConfig { + cfg = &refCopy + } else { + layers = append(layers, refCopy) + } + } + + // make sure they were found. + if cfg == nil { + return nil, errors.New("buildkit index has no config reference") + } + + m, err := ocischema.FromStruct( + ocischema.Manifest{ + Versioned: ocischema.SchemaVersion, + Config: *cfg, + Layers: layers, + }, + ) + if err != nil { + return nil, fmt.Errorf("building manifest from buildkit index: %w", err) + } + + return m, nil +} diff --git a/registry/app/pkg/docker/context.go b/registry/app/pkg/docker/context.go new file mode 100644 index 000000000..a3fd1470e --- /dev/null +++ b/registry/app/pkg/docker/context.go @@ -0,0 +1,60 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + + "github.com/harness/gitness/registry/app/storage" + + v2 "github.com/distribution/distribution/v3/registry/api/v2" + "github.com/opencontainers/go-digest" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + *App + context.Context + URLBuilder *v2.URLBuilder + OciBlobStore storage.OciBlobStore + Upload storage.BlobWriter + UUID string + Digest digest.Digest + State BlobUploadState +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +// blobUploadState captures the state serializable state of the blob upload. +type BlobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + Path string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 +} diff --git a/registry/app/pkg/docker/controller.go b/registry/app/pkg/docker/controller.go new file mode 100644 index 000000000..37d6d0ef3 --- /dev/null +++ b/registry/app/pkg/docker/controller.go @@ -0,0 +1,481 @@ +// Source: https://gitlab.com/gitlab-org/container-registry + +// Copyright 2019 Gitlab Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "reflect" + + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types/enum" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog/log" +) + +type Controller struct { + *pkg.CoreController + local *LocalRegistry + remote *RemoteRegistry + spaceStore corestore.SpaceStore + authorizer authz.Authorizer +} + +type TagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +var _ pkg.Artifact = (*LocalRegistry)(nil) +var _ pkg.Artifact = (*RemoteRegistry)(nil) + +func NewController( + local *LocalRegistry, + remote *RemoteRegistry, + coreController *pkg.CoreController, + spaceStore corestore.SpaceStore, + authorizer authz.Authorizer, +) *Controller { + c := &Controller{ + CoreController: coreController, + local: local, + remote: remote, + spaceStore: spaceStore, + authorizer: authorizer, + } + + pkg.TypeRegistry[pkg.LocalRegistry] = local + pkg.TypeRegistry[pkg.RemoteRegistry] = remote + return c +} + +func isEmpty(slice interface{}) bool { + if slice == nil { + return true + } + return reflect.ValueOf(slice).Len() == 0 +} + +func (c *Controller) ProxyWrapper( + ctx context.Context, + f func(registry registrytypes.Registry, imageName string, artInfo pkg.Artifact) Response, + info pkg.RegistryInfo, +) Response { + none := pkg.RegistryInfo{} + if info == none { + log.Ctx(ctx).Error().Stack().Msg("artifactinfo is not found") + return nil + } + + var response Response + requestRepoKey := info.RegIdentifier + imageName := info.Image + if repos, err := c.GetOrderedRepos(ctx, requestRepoKey, info); err == nil { + for _, registry := range repos { + log.Ctx(ctx).Info().Msgf("Using Repository: %s, Type: %s", registry.Name, registry.Type) + artifact, ok := c.GetArtifact(registry).(Registry) + if !ok { + log.Ctx(ctx).Warn().Msgf("artifact %s is not a registry", registry.Name) + continue + } + if artifact != nil { + response = f(registry, imageName, artifact) + if isEmpty(response.GetErrors()) { + return response + } + } + } + } + return response +} + +func (c *Controller) HeadManifest( + ctx context.Context, + art pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) Response { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, art.RegIdentifier, art.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return &GetManifestResponse{ + Errors: []error{errcode.ErrCodeDenied}, + } + } + + f := func(registry registrytypes.Registry, _ string, a pkg.Artifact) Response { + art.SetRepoKey(registry.Name) + headers, desc, man, e := a.(Registry).ManifestExist(ctx, art, acceptHeaders, ifNoneMatchHeader) + response := &GetManifestResponse{e, headers, desc, man} + return response + } + + result := c.ProxyWrapper(ctx, f, art) + return result +} + +func (c *Controller) PullManifest( + ctx context.Context, + art pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) Response { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, art.RegIdentifier, art.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return &GetManifestResponse{ + Errors: []error{errcode.ErrCodeDenied}, + } + } + f := func(registry registrytypes.Registry, _ string, a pkg.Artifact) Response { + art.SetRepoKey(registry.Name) + headers, desc, man, e := a.(Registry).PullManifest(ctx, art, acceptHeaders, ifNoneMatchHeader) + response := &GetManifestResponse{e, headers, desc, man} + return response + } + + result := c.ProxyWrapper(ctx, f, art) + return result +} + +func (c *Controller) PutManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + mediaType string, + body io.ReadCloser, + length int64, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, artInfo.RegIdentifier, + artInfo.ParentID, enum.PermissionArtifactsUpload, enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + return c.local.PutManifest(ctx, artInfo, mediaType, body, length) +} + +func (c *Controller) DeleteManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, +) (errs []error, responseHeaders *commons.ResponseHeaders) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, artInfo.RegIdentifier, artInfo.ParentID, + enum.PermissionArtifactsDelete, + ) + if err != nil { + return []error{errcode.ErrCodeDenied}, nil + } + return c.local.DeleteManifest(ctx, artInfo) +} + +func (c *Controller) HeadBlob( + ctx context.Context, + info pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, readCloser io.ReadCloser, + redirectURL string, errs []error, +) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, nil, 0, nil, "", []error{errcode.ErrCodeDenied} + } + return c.local.HeadBlob(ctx, info) +} + +func (c *Controller) GetBlob(ctx context.Context, info pkg.RegistryInfo) Response { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return &GetBlobResponse{ + Errors: []error{errcode.ErrCodeDenied}, + } + } + f := func(registry registrytypes.Registry, _ string, a pkg.Artifact) Response { + info.SetRepoKey(registry.Name) + headers, body, size, readCloser, redirectURL, errs := a.(Registry).GetBlob(ctx, info) + return &GetBlobResponse{errs, headers, body, size, readCloser, redirectURL} + } + + return c.ProxyWrapper(ctx, f, info) +} + +func (c *Controller) InitiateUploadBlob( + ctx context.Context, + info pkg.RegistryInfo, + fromRepo string, + mountDigest string, +) (*commons.ResponseHeaders, []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsUpload, + enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + return c.local.InitBlobUpload(ctx, info, fromRepo, mountDigest) +} + +func (c *Controller) GetUploadBlobStatus( + ctx context.Context, + info pkg.RegistryInfo, + token string, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + blobCtx := c.local.App.GetBlobsContext(ctx, info) + return c.local.GetBlobUploadStatus(blobCtx, info, token) +} + +func (c *Controller) PatchBlobUpload( + ctx context.Context, + info pkg.RegistryInfo, + ct string, + cr string, + cl string, + length int64, + token string, + body io.ReadCloser, +) (responseHeaders *commons.ResponseHeaders, errors []error) { + blobCtx := c.local.App.GetBlobsContext(ctx, info) + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDownload, + enum.PermissionArtifactsUpload, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + errors = make([]error, 0) + if blobCtx.UUID != "" { + errs := ResumeBlobUpload(blobCtx, token) + errors = append(errors, errs...) + } + if blobCtx.Upload != nil { + defer blobCtx.Upload.Close() + } + + rs, errs := c.local.PushBlobChunk(blobCtx, info, ct, cr, cl, body, length) + if !commons.IsEmpty(errs) { + errors = append(errors, errs...) + } + return rs, errors +} + +func (c *Controller) CompleteBlobUpload( + ctx context.Context, + info pkg.RegistryInfo, + body io.ReadCloser, + length int64, + stateToken string, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsUpload, + enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + return c.local.PushBlob(ctx, info, body, length, stateToken) +} + +func (c *Controller) CancelBlobUpload( + ctx context.Context, + info pkg.RegistryInfo, + stateToken string, +) (responseHeaders *commons.ResponseHeaders, errors []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDelete, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + + blobCtx := c.local.App.GetBlobsContext(ctx, info) + + errors = make([]error, 0) + + if blobCtx.UUID != "" { + errs := ResumeBlobUpload(blobCtx, stateToken) + errors = append(errors, errs...) + } + + if blobCtx.Upload == nil { + e := errcode.ErrCodeBlobUploadUnknown + errors = append(errors, e) + return responseHeaders, errors + } + defer blobCtx.Upload.Close() + + responseHeaders = &commons.ResponseHeaders{ + Headers: map[string]string{"Docker-Upload-UUID": blobCtx.UUID}, + } + + if err := blobCtx.Upload.Cancel(blobCtx); err != nil { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("error encountered canceling upload: %v", err) + errors = append(errors, errcode.ErrCodeUnknown.WithDetail(err)) + } + + responseHeaders.Code = http.StatusNoContent + return responseHeaders, errors +} + +func (c *Controller) DeleteBlob( + ctx context.Context, + info pkg.RegistryInfo, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, info.RegIdentifier, info.ParentID, enum.PermissionArtifactsDelete, + ) + if err != nil { + return nil, []error{errcode.ErrCodeDenied} + } + blobCtx := c.local.App.GetBlobsContext(ctx, info) + return c.local.DeleteBlob(blobCtx, info) +} + +func (c *Controller) GetTags( + ctx context.Context, + lastEntry string, + maxEntries int, + origURL string, + artInfo pkg.RegistryInfo, +) (*commons.ResponseHeaders, []string, error) { + err := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, artInfo.RegIdentifier, + artInfo.ParentID, enum.PermissionArtifactsDownload, + ) + if err != nil { + return nil, nil, errcode.ErrCodeDenied + } + return c.local.ListTags(ctx, lastEntry, maxEntries, origURL, artInfo) +} + +func (c *Controller) GetCatalog(_ http.ResponseWriter, _ *http.Request) { + log.Info().Msgf("Not implemented yet!") +} + +func (c *Controller) GetReferrers( + ctx context.Context, + artInfo pkg.RegistryInfo, + artifactType string, +) (index *v1.Index, responseHeaders *commons.ResponseHeaders, err error) { + accessErr := GetRegistryCheckAccess( + ctx, c.RegistryDao, c.authorizer, c.spaceStore, artInfo.RegIdentifier, + artInfo.ParentID, enum.PermissionArtifactsDownload, + ) + if accessErr != nil { + return nil, nil, errcode.ErrCodeDenied + } + return c.local.ListReferrers(ctx, artInfo, artifactType) +} + +func ResumeBlobUpload(ctx *Context, stateToken string) []error { + var errs []error + state, err := hmacKey(ctx.App.Config.Registry.HTTP.Secret).unpackUploadState(stateToken) + if err != nil { + log.Ctx(ctx).Info().Msgf("error resolving upload: %v", err) + errs = append(errs, errcode.ErrCodeBlobUploadInvalid.WithDetail(err)) + return errs + } + ctx.State = state + + if state.Path != ctx.OciBlobStore.Path() { + log.Ctx(ctx).Info().Msgf("mismatched path in upload state: %q != %q", state.Path, ctx.OciBlobStore.Path()) + errs = append(errs, errcode.ErrCodeBlobUploadInvalid.WithDetail(err)) + return errs + } + + if state.UUID != ctx.UUID { + log.Ctx(ctx).Info().Msgf("mismatched uuid in upload state: %q != %q", state.UUID, ctx.UUID) + errs = append(errs, errcode.ErrCodeBlobUploadInvalid.WithDetail(err)) + return errs + } + + blobs := ctx.OciBlobStore + upload, err := blobs.Resume(ctx.Context, ctx.UUID) + if err != nil { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("error resolving upload: %v", err) + if errors.Is(err, storage.ErrBlobUploadUnknown) { + errs = append(errs, errcode.ErrCodeBlobUploadUnknown.WithDetail(err)) + return errs + } + + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + return errs + } + ctx.Upload = upload + + if size := upload.Size(); size != ctx.State.Offset { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("upload resumed at wrong offset: %d != %d", size, ctx.State.Offset) + errs = append(errs, errcode.ErrCodeRangeInvalid.WithDetail(err)) + return errs + } + return errs +} + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (BlobUploadState, error) { + var state BlobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, fmt.Errorf("failed to decode token: %w", err) + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, errInvalidSecret + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, errInvalidSecret + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} diff --git a/registry/app/pkg/docker/helpers.go b/registry/app/pkg/docker/helpers.go new file mode 100644 index 000000000..990eff536 --- /dev/null +++ b/registry/app/pkg/docker/helpers.go @@ -0,0 +1,69 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "fmt" + + apiauth "github.com/harness/gitness/app/api/auth" + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/types" + "github.com/harness/gitness/types/enum" +) + +// GetRegistryCheckAccess fetches an active registry +// and checks if the current user has permission to access it. +func GetRegistryCheckAccess( + ctx context.Context, + registryStore store.RegistryRepository, + authorizer authz.Authorizer, + spaceStore corestore.SpaceStore, + repoName string, + parentID int64, + reqPermissions ...enum.Permission, +) error { + registry, err := registryStore.GetByParentIDAndName(ctx, parentID, repoName) + if err != nil { + return fmt.Errorf("failed to find registry: %w", err) + } + space, err := spaceStore.Find(ctx, parentID) + if err != nil { + return fmt.Errorf("failed to find parent by ref: %w", err) + } + session, _ := request.AuthSessionFrom(ctx) + var permissionChecks []types.PermissionCheck + + for i := range reqPermissions { + permissionCheck := types.PermissionCheck{ + Permission: reqPermissions[i], + Scope: types.Scope{SpacePath: space.Identifier}, + Resource: types.Resource{ + Type: enum.ResourceTypeRegistry, + Identifier: registry.Name, + }, + } + permissionChecks = append(permissionChecks, permissionCheck) + } + + if err = apiauth.CheckRegistry(ctx, authorizer, session, permissionChecks...); err != nil { + return fmt.Errorf("access check failed: %w", err) + } + + return nil +} diff --git a/registry/app/pkg/docker/local.go b/registry/app/pkg/docker/local.go new file mode 100644 index 000000000..629276127 --- /dev/null +++ b/registry/app/pkg/docker/local.go @@ -0,0 +1,1755 @@ +// Source: https://gitlab.com/gitlab-org/container-registry + +// Copyright 2019 Gitlab Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "database/sql" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/dist_temp/errcode" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/manifest/manifestlist" + "github.com/harness/gitness/registry/app/manifest/ocischema" + "github.com/harness/gitness/registry/app/manifest/schema2" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/gc" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/store/database/dbtx" + gitnesstypes "github.com/harness/gitness/types" + + "github.com/distribution/distribution/v3" + "github.com/distribution/reference" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog/log" +) + +const ( + defaultArch = "amd64" + defaultOS = "linux" + imageClass = "image" +) + +type storageType int + +const ( + manifestSchema2 storageType = iota // 0 + manifestlistSchema // 1 + ociSchema // 2 + ociImageIndexSchema // 3 + numStorageTypes // 4 +) + +const ( + manifestListCreateGCReviewWindow = 1 * time.Hour + manifestListCreateGCLockTimeout = 10 * time.Second + manifestTagGCLockTimeout = 10 * time.Second + tagDeleteGCLockTimeout = 10 * time.Second + manifestTagGCReviewWindow = 1 * time.Hour + manifestDeleteGCReviewWindow = 1 * time.Hour + manifestDeleteGCLockTimeout = 10 * time.Second + blobExistsGCLockTimeout = 10 * time.Second + blobExistsGCReviewWindow = 1 * time.Hour + DefaultMaximumReturnedEntries = 100 +) + +const ( + ReferrersSchemaVersion = 2 + ReferrersMediaType = "application/vnd.oci.image.index.v1+json" + ArtifactTypeLocalRegistry = "Local Registry" +) + +type CatalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +type S3Store interface { +} + +var errInvalidSecret = fmt.Errorf("invalid secret") + +type hmacKey string + +func NewLocalRegistry( + app *App, ms ManifestService, manifestDao store.ManifestRepository, + registryDao store.RegistryRepository, registryBlobDao store.RegistryBlobRepository, + blobRepo store.BlobRepository, mtRepository store.MediaTypesRepository, + tagDao store.TagRepository, artifactDao store.ArtifactRepository, artifactStatDao store.ArtifactStatRepository, + gcService gc.Service, tx dbtx.Transactor, +) Registry { + return &LocalRegistry{ + App: app, + ms: ms, + registryDao: registryDao, + manifestDao: manifestDao, + registryBlobDao: registryBlobDao, + blobRepo: blobRepo, + mtRepository: mtRepository, + tagDao: tagDao, + artifactDao: artifactDao, + artifactStatDao: artifactStatDao, + gcService: gcService, + tx: tx, + } +} + +type LocalRegistry struct { + App *App + ms ManifestService + registryDao store.RegistryRepository + manifestDao store.ManifestRepository + registryBlobDao store.RegistryBlobRepository + blobRepo store.BlobRepository + mtRepository store.MediaTypesRepository + tagDao store.TagRepository + artifactDao store.ArtifactRepository + artifactStatDao store.ArtifactStatRepository + gcService gc.Service + tx dbtx.Transactor +} + +func (r *LocalRegistry) Base() error { + return nil +} + +func (r *LocalRegistry) CanBeMount() (mount bool, repository string, err error) { + // TODO implement me + panic("implement me") +} + +func (r *LocalRegistry) GetArtifactType() string { + return ArtifactTypeLocalRegistry +} + +func (r *LocalRegistry) getManifest( + ctx context.Context, + manifestDigest digest.Digest, + repoKey string, + imageName string, + info pkg.RegistryInfo, +) (manifest.Manifest, error) { + dbRepo, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return nil, err + } + + if dbRepo == nil { + return nil, manifest.RegistryUnknownError{Name: repoKey} + } + + log.Ctx(ctx).Debug().Msgf("getting manifest by digest from database") + + dig, _ := types.NewDigest(manifestDigest) + // Find manifest by its digest + dbManifest, err := r.manifestDao.FindManifestByDigest(ctx, dbRepo.ID, imageName, dig) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return nil, manifest.UnknownRevisionError{ + Name: repoKey, + Revision: manifestDigest, + } + } + return nil, err + } + + return DBManifestToManifest(dbManifest) +} + +func DBManifestToManifest(dbm *types.Manifest) (manifest.Manifest, error) { + if dbm.SchemaVersion == 1 { + return nil, manifest.ErrSchemaV1Unsupported + } + + if dbm.SchemaVersion != 2 { + return nil, fmt.Errorf("unrecognized manifest schema version %d", dbm.SchemaVersion) + } + + mediaType := dbm.MediaType + if dbm.NonConformant { + // parse payload and get real media type + var versioned manifest.Versioned + if err := json.Unmarshal(dbm.Payload, &versioned); err != nil { + return nil, fmt.Errorf("failed to unmarshal manifest payload: %w", err) + } + mediaType = versioned.MediaType + } + + // This can be an image manifest or a manifest list + switch mediaType { + case schema2.MediaTypeManifest: + m := &schema2.DeserializedManifest{} + if err := m.UnmarshalJSON(dbm.Payload); err != nil { + return nil, err + } + + return m, nil + case v1.MediaTypeImageManifest: + m := &ocischema.DeserializedManifest{} + if err := m.UnmarshalJSON(dbm.Payload); err != nil { + return nil, err + } + + return m, nil + case manifestlist.MediaTypeManifestList, v1.MediaTypeImageIndex: + m := &manifestlist.DeserializedManifestList{} + if err := m.UnmarshalJSON(dbm.Payload); err != nil { + return nil, err + } + + return m, nil + case "": + // OCI image or image index - no media type in the content + + // First see if it looks like an image index + resIndex := &manifestlist.DeserializedManifestList{} + if err := resIndex.UnmarshalJSON(dbm.Payload); err != nil { + return nil, err + } + if resIndex.Manifests != nil { + return resIndex, nil + } + + // Otherwise, assume it must be an image manifest + m := &ocischema.DeserializedManifest{} + if err := m.UnmarshalJSON(dbm.Payload); err != nil { + return nil, err + } + + return m, nil + default: + return nil, + manifest.VerificationErrors{ + fmt.Errorf("unrecognized manifest content type %s", dbm.MediaType), + } + } +} + +func (r *LocalRegistry) getTag(ctx context.Context, info pkg.RegistryInfo) (*manifest.Descriptor, error) { + dbRepo, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, info.RegIdentifier) + + if err != nil { + return nil, err + } + + if dbRepo == nil { + return nil, manifest.RegistryUnknownError{Name: info.RegIdentifier} + } + + log.Ctx(ctx).Info().Msgf("getting manifest by tag from database") + dbManifest, err := r.manifestDao.FindManifestByTagName(ctx, dbRepo.ID, info.Image, info.Tag) + if err != nil { + // at the DB level a tag has a FK to manifests, so a tag cannot exist + // unless it points to an existing manifest + if errors.Is(err, store2.ErrResourceNotFound) { + return nil, manifest.TagUnknownError{Tag: info.Tag} + } + return nil, err + } + + return &manifest.Descriptor{Digest: dbManifest.Digest}, nil +} + +func etagMatch(headers []string, etag string) bool { + for _, headerVal := range headers { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { + // allow quoted or unquoted + return true + } + } + return false +} + +// copyFullPayload copies the payload of an HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +// +// The copy will be limited to `limit` bytes, if limit is greater than zero. +func copyFullPayload( + ctx context.Context, length int64, body io.ReadCloser, + destWriter io.Writer, action string, +) error { + // Get a channel that tells us if the client disconnects + clientClosed := ctx.Done() + + // Read in the data, if any. + copied, err := io.Copy(destWriter, body) + if clientClosed != nil && (err != nil || (length > 0 && copied < length)) { + // Didn't receive as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response Code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + // responseWriter.WriteHeader(499) + + dcontext.GetLoggerWithFields( + ctx, log.Error(), map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": length, + }, "error", "copied", "contentLength", + ).Msg("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + dcontext.GetLogger(ctx, log.Error()).Msgf("unknown error reading request payload: %v", err) + return err + } + + return nil +} + +func (r *LocalRegistry) HeadBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, readCloser io.ReadCloser, + redirectURL string, errs []error, +) { + return r.fetchBlobInternal(ctx2, http.MethodHead, artInfo) +} + +func (r *LocalRegistry) GetBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, + readCloser io.ReadCloser, redirectURL string, errs []error, +) { + return r.fetchBlobInternal(ctx2, http.MethodGet, artInfo) +} + +func (r *LocalRegistry) fetchBlobInternal( + ctx2 context.Context, method string, info pkg.RegistryInfo, +) (*commons.ResponseHeaders, *storage.FileReader, int64, io.ReadCloser, string, []error) { + ctx := r.App.GetBlobsContext(ctx2, info) + + responseHeaders := &commons.ResponseHeaders{ + Code: 0, + Headers: make(map[string]string), + } + errs := make([]error, 0) + var dgst digest.Digest + blobs := ctx.OciBlobStore + + if err := r.dbBlobLinkExists(ctx, ctx.Digest, info.RegIdentifier, info); err != nil { + errs = append(errs, errcode.FromUnknownError(err)) + return responseHeaders, nil, -1, nil, "", errs + } + dgst = ctx.Digest + headers := make(map[string]string) + fileReader, redirectURL, size, err := blobs.ServeBlobInternal( + ctx.Context, + info.RootIdentifier, + dgst, + headers, + method, + ) + if err != nil { + if fileReader != nil { + fileReader.Close() + } + if errors.Is(err, storage.ErrBlobUnknown) { + errs = append(errs, errcode.ErrCodeBlobUnknown.WithDetail(ctx.Digest)) + } else { + errs = append(errs, errcode.FromUnknownError(err)) + } + return responseHeaders, nil, -1, nil, "", errs + } + + if redirectURL != "" { + return responseHeaders, nil, -1, nil, redirectURL, errs + } + + for key, value := range headers { + responseHeaders.Headers[key] = value + } + + return responseHeaders, fileReader, size, nil, "", errs +} + +func (r *LocalRegistry) PullManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) (responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifest manifest.Manifest, errs []error) { + return r.ManifestExist(ctx, artInfo, acceptHeaders, ifNoneMatchHeader) +} + +func (r *LocalRegistry) getDigestByTag(ctx context.Context, artInfo pkg.RegistryInfo) (digest.Digest, error) { + desc, err := r.getTag(ctx, artInfo) + if err != nil { + var tagUnknownError manifest.TagUnknownError + if errors.As(err, &tagUnknownError) { + return "", errcode.ErrCodeManifestUnknown.WithDetail(err) + } + return "", err + } + return desc.Digest, nil +} + +func getDigestFromInfo(artInfo pkg.RegistryInfo) digest.Digest { + if artInfo.Digest != "" { + return digest.Digest(artInfo.Digest) + } + return digest.Digest(artInfo.Reference) +} + +func (r *LocalRegistry) getDigest(ctx context.Context, artInfo pkg.RegistryInfo) (digest.Digest, error) { + if artInfo.Tag != "" { + return r.getDigestByTag(ctx, artInfo) + } + return getDigestFromInfo(artInfo), nil +} + +func (r *LocalRegistry) ManifestExist( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) ( + responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifestResult manifest.Manifest, + errs []error, +) { + tag := artInfo.Tag + supports := r.getSupportsList(acceptHeaders) + + d, err := r.getDigest(ctx, artInfo) + if err != nil { + return responseHeaders, descriptor, manifestResult, []error{err} + } + + if etagMatch(ifNoneMatchHeader, d.String()) { + r2 := &commons.ResponseHeaders{ + Code: http.StatusNotModified, + } + return r2, manifest.Descriptor{Digest: d}, nil, nil + } + + manifestResult, err = r.getManifest(ctx, d, artInfo.RegIdentifier, artInfo.Image, artInfo) + if err != nil { + var manifestUnknownRevisionError manifest.UnknownRevisionError + if errors.As(err, &manifestUnknownRevisionError) { + errs = append(errs, errcode.ErrCodeManifestUnknown.WithDetail(err)) + } + return responseHeaders, descriptor, manifestResult, errs + } + // determine the type of the returned manifest + manifestType := manifestSchema2 + manifestList, isManifestList := manifestResult.(*manifestlist.DeserializedManifestList) + if _, isOCImanifest := manifestResult.(*ocischema.DeserializedManifest); isOCImanifest { + manifestType = ociSchema + } else if isManifestList { + if manifestList.MediaType == manifestlist.MediaTypeManifestList { + manifestType = manifestlistSchema + } else if manifestList.MediaType == v1.MediaTypeImageIndex { + manifestType = ociImageIndexSchema + } + } + + if manifestType == ociSchema && !supports[ociSchema] { + errs = append( + errs, + errcode.ErrCodeManifestUnknown.WithMessage( + "OCI manifest found, but accept header does not support OCI manifests", + ), + ) + return responseHeaders, descriptor, manifestResult, errs + } + if manifestType == ociImageIndexSchema && !supports[ociImageIndexSchema] { + errs = append( + errs, + errcode.ErrCodeManifestUnknown.WithMessage( + "OCI index found, but accept header does not support OCI indexes", + ), + ) + return responseHeaders, descriptor, manifestResult, errs + } + + if tag != "" && manifestType == manifestlistSchema && !supports[manifestlistSchema] { + d, manifestResult, err = r.rewriteManifest(ctx, artInfo, d, manifestList, supports) + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + } + + ct, p, err := manifestResult.Payload() + if err != nil { + return responseHeaders, descriptor, manifestResult, errs + } + + r2 := &commons.ResponseHeaders{ + Headers: map[string]string{ + "Content-Type": ct, + "Content-Length": fmt.Sprint(len(p)), + "Docker-Content-Digest": d.String(), + "Etag": fmt.Sprintf(`"%s"`, d), + }, + } + + return r2, manifest.Descriptor{Digest: d}, manifestResult, nil +} + +func (r *LocalRegistry) rewriteManifest( + ctx context.Context, artInfo pkg.RegistryInfo, d digest.Digest, manifestList *manifestlist.DeserializedManifestList, + supports [4]bool, +) (digest.Digest, manifest.Manifest, error) { + // Rewrite manifest in schema1 format + log.Ctx(ctx).Info().Msgf( + "rewriting manifest list %s in schema1 format to support old client", d.String(), + ) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && + manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", nil, errcode.ErrCodeManifestUnknown + } + + manifestResult, err := r.getManifest( + ctx, manifestDigest, + artInfo.RegIdentifier, artInfo.Image, artInfo, + ) + if err != nil { + var manifestUnknownRevisionError manifest.UnknownRevisionError + if errors.As(err, &manifestUnknownRevisionError) { + return "", nil, errcode.ErrCodeManifestUnknown.WithDetail(err) + } + return "", nil, err + } + + if _, isSchema2 := manifestResult.(*schema2.DeserializedManifest); isSchema2 && !supports[manifestSchema2] { + return "", manifestResult, errcode.ErrCodeManifestInvalid.WithMessage("Schema 2 manifest not supported by client") + } + d = manifestDigest + return d, manifestResult, nil +} + +func (r *LocalRegistry) getSupportsList(acceptHeaders []string) [4]bool { + var supports [numStorageTypes]bool + // this parsing of Accept Headers is not quite as full-featured as + // godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/ + // e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range acceptHeaders { + // r.Header[...] is a slice in case the request contains + // the same header more than once + // if the header isn't set, we'll get the zero value, + // which "range" will handle gracefully + + // we need to split each header value on "," to get the full + // list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + mediaType = strings.TrimSpace(mediaType) + if _, _, err := mime.ParseMediaType(mediaType); err != nil { + continue + } + + switch mediaType { + case schema2.MediaTypeManifest: + supports[manifestSchema2] = true + case manifestlist.MediaTypeManifestList: + supports[manifestlistSchema] = true + case v1.MediaTypeImageManifest: + supports[ociSchema] = true + case v1.MediaTypeImageIndex: + supports[ociImageIndexSchema] = true + } + } + } + return supports +} + +func (r *LocalRegistry) appendPutError(err error, errList []error) []error { + // TODO: Move this error list inside the context + + if errors.Is(err, manifest.ErrUnsupported) { + errList = append(errList, errcode.ErrCodeUnsupported) + return errList + } + if errors.Is(err, manifest.ErrAccessDenied) { + errList = append(errList, errcode.ErrCodeDenied) + return errList + } + if errors.Is(err, manifest.ErrSchemaV1Unsupported) { + errList = append( + errList, + errcode.ErrCodeManifestInvalid.WithDetail( + "manifest type unsupported", + ), + ) + return errList + } + if errors.Is(err, digest.ErrDigestInvalidFormat) { + errList = append(errList, errcode.ErrCodeDigestInvalid.WithDetail(err)) + return errList + } + + switch { + case errors.As(err, &manifest.VerificationErrors{}): + var verificationError manifest.VerificationErrors + errors.As(err, &verificationError) + for _, verificationError := range verificationError { + switch { + case errors.As(verificationError, &manifest.BlobUnknownError{}): + var manifestBlobUnknownError manifest.BlobUnknownError + errors.As(verificationError, &manifestBlobUnknownError) + errList = append( + errList, errcode.ErrCodeManifestBlobUnknown.WithDetail( + manifestBlobUnknownError.Digest, + ), + ) + case errors.As(verificationError, &manifest.NameInvalidError{}): + errList = append( + errList, errcode.ErrCodeNameInvalid.WithDetail(err), + ) + case errors.As(verificationError, &manifest.UnverifiedError{}): + errList = append(errList, errcode.ErrCodeManifestUnverified) + case errors.As(verificationError, &manifest.ReferencesExceedLimitError{}): + errList = append( + errList, errcode.ErrCodeManifestReferenceLimit.WithDetail(err), + ) + case errors.As(verificationError, &manifest.PayloadSizeExceedsLimitError{}): + errList = append( + errList, errcode.ErrCodeManifestPayloadSizeLimit.WithDetail(err.Error()), + ) + default: + if errors.Is(verificationError, digest.ErrDigestInvalidFormat) { + errList = append(errList, errcode.ErrCodeDigestInvalid) + } else { + errList = append(errList, errcode.FromUnknownError(verificationError)) + } + } + } + case errors.As(err, &errcode.Error{}): + errList = append(errList, err) + default: + errList = append(errList, errcode.FromUnknownError(err)) + } + return errList +} + +func (r *LocalRegistry) PutManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + mediaType string, + body io.ReadCloser, + length int64, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + var jsonBuf bytes.Buffer + d, _ := digest.Parse(artInfo.Digest) + tag := artInfo.Tag + log.Ctx(ctx).Info().Msgf("Pushing manifest %s %s / %s", artInfo.RegIdentifier, d, tag) + + responseHeaders = &commons.ResponseHeaders{ + Headers: map[string]string{}, + Code: http.StatusCreated, + } + + if err := copyFullPayload(ctx, length, body, &jsonBuf, "image manifest PUT"); err != nil { + // copyFullPayload reports the error if necessary + errs = append(errs, errcode.ErrCodeManifestInvalid.WithDetail(err.Error())) + return responseHeaders, errs + } + + unmarshalManifest, desc, err := manifest.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { + errs = append(errs, errcode.ErrCodeManifestInvalid.WithDetail(err)) + return responseHeaders, errs + } + + if d != "" { + if desc.Digest != d { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("payload digest does not match: %q != %q", desc.Digest, d) + errs = append(errs, errcode.ErrCodeDigestInvalid) + return responseHeaders, errs + } + } else { + if tag != "" { + d = desc.Digest + } else { + errs = append(errs, errcode.ErrCodeTagInvalid.WithDetail("no tag or digest specified")) + return responseHeaders, errs + } + } + + isAnOCIManifest := mediaType == v1.MediaTypeImageManifest || + mediaType == v1.MediaTypeImageIndex + + if isAnOCIManifest { + log.Ctx(ctx).Debug().Msg("Putting an OCI Manifest!") + } else { + log.Ctx(ctx).Debug().Msg("Putting a Docker Manifest!") + } + + // We don't need to store manifest file in S3 storage + // manifestServicePut(ctx, _manifest, options...) + + if err = r.ms.DBPut( + ctx, unmarshalManifest, d, artInfo.RegIdentifier, + responseHeaders, artInfo, + ); err != nil { + errs = r.appendPutError(err, errs) + return responseHeaders, errs + } + + // Tag this manifest + if tag != "" { + if err = r.ms.DBTag( + ctx, unmarshalManifest, d, tag, artInfo.RegIdentifier, + responseHeaders, artInfo, + ); err != nil { + errs = r.appendPutError(err, errs) + return responseHeaders, errs + } + } + + if err != nil { + return r.handlePutManifestErrors(err, errs, responseHeaders) + } + + // Tag this manifest + if tag != "" { + err = tagserviceTag() + // err = tags.Tag(imh, Tag, desc) + if err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + return responseHeaders, errs + } + } + + // Construct a canonical url for the uploaded manifest. + name, _ := reference.WithName(fmt.Sprintf("%s/%s", artInfo.RegIdentifier, artInfo.Image)) + canonicalRef, err := reference.WithDigest(name, d) + if err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + return responseHeaders, errs + } + + builder := artInfo.URLBuilder + location, err := builder.BuildManifestURL(canonicalRef) + if err != nil { + log.Ctx(ctx).Error().Stack().Err( + err, + ).Msgf("error building manifest url from digest: %v", err) + } + + responseHeaders.Headers["Location"] = location + responseHeaders.Headers["Docker-Content-Digest"] = d.String() + responseHeaders.Code = http.StatusCreated + + log.Debug().Msg("Succeeded in putting manifest!") + return responseHeaders, errs +} + +func (r *LocalRegistry) handlePutManifestErrors( + err error, errs []error, responseHeaders *commons.ResponseHeaders, +) (*commons.ResponseHeaders, []error) { + if errors.Is(err, manifest.ErrUnsupported) { + errs = append(errs, errcode.ErrCodeUnsupported) + return responseHeaders, errs + } + if errors.Is(err, manifest.ErrAccessDenied) { + errs = append(errs, errcode.ErrCodeDenied) + return responseHeaders, errs + } + switch { + case errors.As(err, &manifest.VerificationErrors{}): + var verificationError manifest.VerificationErrors + errors.As(err, &verificationError) + for _, verificationError := range verificationError { + switch { + case errors.As(verificationError, &manifest.BlobUnknownError{}): + var manifestBlobUnknownError manifest.BlobUnknownError + errors.As(verificationError, &manifestBlobUnknownError) + dgst := manifestBlobUnknownError.Digest + errs = append( + errs, + errcode.ErrCodeManifestBlobUnknown.WithDetail( + dgst, + ), + ) + case errors.As(verificationError, &manifest.NameInvalidError{}): + errs = append( + errs, + errcode.ErrCodeNameInvalid.WithDetail(err), + ) + case errors.As(verificationError, &manifest.UnverifiedError{}): + errs = append(errs, errcode.ErrCodeManifestUnverified) + default: + if errors.Is(verificationError, digest.ErrDigestInvalidFormat) { + errs = append(errs, errcode.ErrCodeDigestInvalid) + } else { + errs = append(errs, errcode.ErrCodeUnknown, verificationError) + } + } + } + case errors.As(err, &errcode.Error{}): + errs = append(errs, err) + default: + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + } + return responseHeaders, errs +} + +func tagserviceTag() error { + // TODO: implement this + return nil +} + +func (r *LocalRegistry) PushBlobMonolith( + _ context.Context, + _ pkg.RegistryInfo, + _ int64, + _ io.Reader, +) error { + return nil +} + +func (r *LocalRegistry) InitBlobUpload( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + fromRepo, mountDigest string, +) (*commons.ResponseHeaders, []error) { + blobCtx := r.App.GetBlobsContext(ctx2, artInfo) + var errList []error + responseHeaders := &commons.ResponseHeaders{ + Headers: make(map[string]string), + Code: 0, + } + digest := digest.Digest(mountDigest) + if mountDigest != "" && fromRepo != "" { + err := r.dbMountBlob(blobCtx, fromRepo, artInfo.RegIdentifier, digest, artInfo) + if err != nil { + e := fmt.Errorf("failed to mount blob in database: %w", err) + errList = append(errList, errcode.FromUnknownError(e)) + } + if err = writeBlobCreatedHeaders( + blobCtx, digest, + responseHeaders, artInfo, + ); err != nil { + errList = append(errList, errcode.ErrCodeUnknown.WithDetail(err)) + } + return responseHeaders, errList + } + + blobs := blobCtx.OciBlobStore + upload, err := blobs.Create(blobCtx.Context) + if err != nil { + if errors.Is(err, storage.ErrUnsupported) { + errList = append(errList, errcode.ErrCodeUnsupported) + } else { + errList = append(errList, errcode.ErrCodeUnknown.WithDetail(err)) + } + return responseHeaders, errList + } + + blobCtx.Upload = upload + + if err = blobUploadResponse( + blobCtx, responseHeaders, + artInfo.RegIdentifier, artInfo, + ); err != nil { + errList = append(errList, errcode.ErrCodeUnknown.WithDetail(err)) + return responseHeaders, errList + } + responseHeaders.Headers[commons.HeaderDockerUploadUUID] = blobCtx.Upload.ID() + responseHeaders.Code = http.StatusAccepted + return responseHeaders, nil +} + +func (r *LocalRegistry) PushBlobMonolithWithDigest( + _ context.Context, + _ pkg.RegistryInfo, + _ int64, + _ io.Reader, +) error { + return nil +} + +func (r *LocalRegistry) PushBlobChunk( + ctx *Context, + artInfo pkg.RegistryInfo, + contentType string, + contentRange string, + contentLength string, + body io.ReadCloser, + contentLengthFromRequest int64, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + responseHeaders = &commons.ResponseHeaders{ + Code: 0, + Headers: make(map[string]string), + } + + errs = make([]error, 0) + + if ctx.Upload == nil { + e := errcode.ErrCodeBlobUploadUnknown + errs = append(errs, e) + return responseHeaders, errs + } + + if contentType != "" && contentType != "application/octet-stream" { + e := errcode.ErrCodeUnknown.WithDetail(fmt.Errorf("bad Content-Type")) + errs = append(errs, e) + return responseHeaders, errs + } + + if contentRange != "" && contentLength != "" { + start, end, err := parseContentRange(contentRange) + if err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err.Error())) + return responseHeaders, errs + } + if start > end || start != ctx.Upload.Size() { + errs = append(errs, errcode.ErrCodeRangeInvalid) + return responseHeaders, errs + } + + clInt, err := strconv.ParseInt(contentLength, 10, 64) + if err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err.Error())) + return responseHeaders, errs + } + if clInt != (end-start)+1 { + errs = append(errs, errcode.ErrCodeSizeInvalid) + return responseHeaders, errs + } + } + + if err := copyFullPayload( + ctx, contentLengthFromRequest, body, ctx.Upload, + "blob PATCH", + ); err != nil { + errs = append( + errs, + errcode.ErrCodeUnknown.WithDetail(err.Error()), + ) + return responseHeaders, errs + } + + if err := blobUploadResponse( + ctx, responseHeaders, artInfo.RegIdentifier, + artInfo, + ); err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + return responseHeaders, errs + } + + responseHeaders.Code = http.StatusAccepted + return responseHeaders, errs +} + +func (r *LocalRegistry) PushBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + body io.ReadCloser, + contentLength int64, + stateToken string, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + errs = make([]error, 0) + responseHeaders = &commons.ResponseHeaders{ + Code: 0, + Headers: make(map[string]string), + } + ctx := r.App.GetBlobsContext(ctx2, artInfo) + if ctx.UUID != "" { + resumeErrs := ResumeBlobUpload(ctx, stateToken) + errs = append(errs, resumeErrs...) + } + + if ctx.Upload == nil { + err := errcode.ErrCodeBlobUploadUnknown + errs = append(errs, err) + return responseHeaders, errs + } + + defer ctx.Upload.Close() + + if artInfo.Digest == "" { + // no digest? return error, but allow retry. + err := errcode.ErrCodeDigestInvalid.WithDetail("digest missing") + errs = append(errs, err) + return responseHeaders, errs + } + + dgst, err := digest.Parse(artInfo.Digest) + if err != nil { + // no digest? return error, but allow retry. + errs = append( + errs, + errcode.ErrCodeDigestInvalid.WithDetail( + "digest parsing failed", + ), + ) + return responseHeaders, errs + } + + if err := copyFullPayload( + ctx, contentLength, body, ctx.Upload, + "blob PUT", + ); err != nil { + errs = append( + errs, + errcode.ErrCodeUnknown.WithDetail(err.Error()), + ) + return responseHeaders, errs + } + + desc, err := ctx.Upload.Commit( + ctx, artInfo.RootIdentifier, manifest.Descriptor{ + Digest: dgst, + }, + ) + + if err != nil { + switch { + case errors.As(err, &storage.BlobInvalidDigestError{}): + errs = append( + errs, + errcode.ErrCodeDigestInvalid.WithDetail(err), + ) + case errors.As(err, &errcode.Error{}): + errs = append(errs, err) + default: + switch { + case errors.Is(err, storage.ErrAccessDenied): + errs = append(errs, errcode.ErrCodeDenied) + case errors.Is(err, storage.ErrUnsupported): + errs = append( + errs, + errcode.ErrCodeUnsupported, + ) + case errors.Is(err, storage.ErrBlobInvalidLength), errors.Is(err, storage.ErrBlobDigestUnsupported): + errs = append( + errs, + errcode.ErrCodeBlobUploadInvalid.WithDetail(err), + ) + default: + dcontext.GetLogger(ctx, log.Error()).Msgf("unknown error completing upload: %v", err) + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + } + } + + // Clean up the backend blob data if there was an error. + if err := ctx.Upload.Cancel(ctx); err != nil { + // If the cleanup fails, all we can do is observe and report. + log.Error().Stack().Err( + err, + ).Msgf("error canceling upload after error: %v", err) + } + return responseHeaders, errs + } + + err = r.dbPutBlobUploadComplete( + ctx, + artInfo.RegIdentifier, + "application/octet-stream", + artInfo.Digest, + int(desc.Size), + artInfo, + ) + if err != nil { + errs = append(errs, err) + log.Error().Stack().Err(err).Msgf( + "ensure blob %s failed, error: %v", + artInfo.Digest, err, + ) + return responseHeaders, errs + } + + if err := writeBlobCreatedHeaders( + ctx, desc.Digest, + responseHeaders, artInfo, + ); err != nil { + errs = append(errs, errcode.ErrCodeUnknown.WithDetail(err)) + return responseHeaders, errs + } + + return responseHeaders, errs +} + +func (r *LocalRegistry) ListTags( + c context.Context, + lastEntry string, + maxEntries int, + origURL string, + artInfo pkg.RegistryInfo, +) (*commons.ResponseHeaders, []string, error) { + filters := types.FilterParams{ + LastEntry: lastEntry, + MaxEntries: maxEntries, + } + + tags, moreEntries, err := r.dbGetTags(c, filters, artInfo) + if err != nil { + return nil, nil, err + } + if len(tags) == 0 { + // If no tags are found, the current implementation (`else`) + // returns a nil slice instead of an empty one, + // so we have to enforce the same behavior here, for consistency. + tags = nil + } + + responseHeaders := &commons.ResponseHeaders{ + Headers: map[string]string{"Content-Type": "application/json"}, + Code: 0, + } + + // Add a link header if there are more entries to retrieve + // (only supported by the metadata database backend) + if moreEntries { + filters.LastEntry = tags[len(tags)-1] + urlStr, err := CreateLinkEntry(origURL, filters, "", "") + if err != nil { + return responseHeaders, nil, errcode.ErrCodeUnknown.WithDetail(err) + } + if urlStr != "" { + responseHeaders.Headers["Link"] = urlStr + } + } + + return responseHeaders, tags, nil +} + +func (r *LocalRegistry) ListFilteredTags( + _ context.Context, + _ int, + _, _ string, + _ pkg.RegistryInfo, +) (tags []string, err error) { + return nil, nil +} + +func (r *LocalRegistry) DeleteManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, +) (errs []error, responseHeaders *commons.ResponseHeaders) { + log.Debug().Msg("DeleteImageManifest") + var tag = artInfo.Tag + var d = artInfo.Digest + + responseHeaders = &commons.ResponseHeaders{} + + // TODO: If Tag is not empty, we just untag the tag, nothing more! + if tag != "" { + log.Debug().Msg("DeleteImageTag") + _, err := r.ms.DeleteTag(ctx, artInfo.RegIdentifier, tag, artInfo) + if err != nil { + errs = append(errs, err) + return errs, responseHeaders + } + responseHeaders.Code = http.StatusAccepted + return errs, responseHeaders + } + + err := r.ms.DeleteManifest( + ctx, artInfo.RegIdentifier, + digest.Digest(d), artInfo, + ) + if err != nil { + switch { + case errors.Is(err, digest.ErrDigestUnsupported): + case errors.Is(err, digest.ErrDigestInvalidFormat): + errs = append(errs, errcode.ErrCodeDigestInvalid) + case errors.Is(err, storage.ErrBlobUnknown): + errs = append(errs, errcode.ErrCodeManifestUnknown) + case errors.Is(err, manifest.ErrUnsupported): + errs = append(errs, errcode.ErrCodeUnsupported) + case errors.Is(err, util.ErrManifestNotFound): + errs = append(errs, errcode.ErrCodeManifestUnknown) + case errors.Is(err, util.ErrManifestReferencedInList): + errs = append(errs, errcode.ErrCodeManifestReferencedInList) + default: + errs = append(errs, errcode.ErrCodeUnknown) + } + return errs, responseHeaders + } + responseHeaders.Code = http.StatusAccepted + return errs, responseHeaders +} + +func (r *LocalRegistry) DeleteBlob( + ctx *Context, + artInfo pkg.RegistryInfo, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + responseHeaders = &commons.ResponseHeaders{ + Code: 0, + Headers: make(map[string]string), + } + + errs = make([]error, 0) + + err := r.dbDeleteBlob(ctx, r.App.Config, artInfo.RegIdentifier, digest.Digest(artInfo.Digest), artInfo) + if err != nil { + switch { + case errors.Is(err, storage.ErrUnsupported): + errs = append(errs, errcode.ErrCodeUnsupported) + case errors.Is(err, storage.ErrBlobUnknown): + errs = append(errs, errcode.ErrCodeBlobUnknown) + case errors.Is(err, storage.RegistryUnknownError{Name: artInfo.RegIdentifier}): + errs = append(errs, errcode.ErrCodeNameUnknown) + default: + errs = append(errs, errcode.FromUnknownError(err)) + log.Error().Stack().Msg("failed to delete blob") + } + return + } + + responseHeaders.Headers["Content-Length"] = "0" + responseHeaders.Code = http.StatusAccepted + return +} + +func (r *LocalRegistry) MountBlob( + _ context.Context, + _ pkg.RegistryInfo, + _, _ string, +) (err error) { + return nil +} + +func (r *LocalRegistry) ListReferrers( + ctx context.Context, + artInfo pkg.RegistryInfo, + artifactType string, +) (index *v1.Index, responseHeaders *commons.ResponseHeaders, err error) { + mfs := make([]v1.Descriptor, 0) + rsHeaders := &commons.ResponseHeaders{ + Headers: map[string]string{"Content-Type": ReferrersMediaType}, + Code: 0, + } + if artifactType != "" { + rsHeaders.Headers["OCI-Filters-Applied"] = "artifactType" + } + registry, err := r.registryDao.GetByParentIDAndName(ctx, artInfo.ParentID, artInfo.RegIdentifier) + if err != nil { + return nil, rsHeaders, err + } + if registry == nil { + err := errcode.ErrCodeNameUnknown.WithDetail(artInfo.RegIdentifier) + return nil, rsHeaders, err + } + subjectDigest, err := types.NewDigest(digest.Digest(artInfo.Digest)) + if err != nil { + return nil, rsHeaders, err + } + manifests, err := r.manifestDao.ListManifestsBySubjectDigest( + ctx, registry.ID, subjectDigest, + ) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return nil, rsHeaders, err + } + + for _, m := range manifests { + mf := v1.Descriptor{ + MediaType: m.MediaType, + Size: m.TotalSize, + Digest: m.Digest, + Annotations: m.Annotations, + } + + if m.ArtifactType.Valid { + mf.ArtifactType = m.ArtifactType.String + } else { + mf.ArtifactType = m.Configuration.MediaType + } + + // filter by the artifactType since the artifactType is + // actually the config media type of the artifact. + if artifactType != "" { + if mf.ArtifactType == artifactType { + mfs = append(mfs, mf) + } + } else { + mfs = append(mfs, mf) + } + } + + // Populate index manifest + result := &v1.Index{} + result.SchemaVersion = ReferrersSchemaVersion + result.MediaType = ReferrersMediaType + result.Manifests = mfs + + return result, rsHeaders, nil +} + +func (r *LocalRegistry) GetBlobUploadStatus( + ctx *Context, + artInfo pkg.RegistryInfo, + _ string, +) (*commons.ResponseHeaders, []error) { + responseHeaders := &commons.ResponseHeaders{ + Code: 0, + Headers: make(map[string]string), + } + + errList := make([]error, 0) + log.Debug().Msgf("GetBlobUploadStatus") + + if ctx.Upload == nil { + blobs := ctx.OciBlobStore + upload, err := blobs.Resume(ctx, ctx.UUID) + if err != nil { + if errors.Is(err, distribution.ErrBlobUploadUnknown) { + errList = append( + errList, + errcode.ErrCodeBlobUploadUnknown.WithDetail(err), + ) + } else { + errList = append( + errList, + errcode.ErrCodeUnknown.WithDetail(err), + ) + } + return responseHeaders, errList + } + + ctx.Upload = upload + } + + if err := blobUploadResponse( + ctx, responseHeaders, + artInfo.RegIdentifier, artInfo, + ); err != nil { + errList = append( + errList, + errcode.ErrCodeUnknown.WithDetail(err), + ) + return responseHeaders, errList + } + + responseHeaders.Code = http.StatusNoContent + return responseHeaders, nil +} +func (r *LocalRegistry) GetCatalog() (repositories []string, err error) { + return nil, nil +} +func (r *LocalRegistry) DeleteTag( + _, _ string, + _ pkg.RegistryInfo, +) error { + return nil +} + +func (r *LocalRegistry) PullBlobChunk( + _, _ string, + _, _, _ int64, + _ pkg.RegistryInfo, +) (size int64, blob io.ReadCloser, err error) { + return 0, nil, nil +} + +// WriteBlobCreatedHeaders writes the standard Headers +// +// describing a newly +// +// created blob. A 201 Created is written as well as the +// +// canonical URL and +// +// blob digest. +func writeBlobCreatedHeaders( + context *Context, + digest digest.Digest, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + path, err := reference.WithName(fmt.Sprintf("%s/%s/%s", info.RootIdentifier, info.RegIdentifier, info.Image)) + if err != nil { + return err + } + ref, err := reference.WithDigest(path, digest) + if err != nil { + return err + } + blobURL, err := context.URLBuilder.BuildBlobURL(ref) + if err != nil { + return err + } + + headers.Headers = map[string]string{ + "Location": blobURL, + "Docker-Content-Digest": digest.String(), + "Content-Length": "0", + } + headers.Code = http.StatusCreated + + return nil +} + +func blobUploadResponse( + context *Context, + headers *commons.ResponseHeaders, + repoKey string, + info pkg.RegistryInfo, +) error { + context.State.Path = context.OciBlobStore.Path() + context.State.UUID = context.Upload.ID() + context.Upload.Close() + context.State.Offset = context.Upload.Size() + + token, err := hmacKey( + context.Config.Registry.HTTP.Secret, + ).packUploadState( + context.State, + ) + if err != nil { + log.Info().Msgf("error building upload state token: %s", err) + return err + } + image := info.Image + path, err := reference.WithName(fmt.Sprintf("%s/%s/%s", info.RootIdentifier, repoKey, image)) + if err != nil { + return err + } + uploadURL, err := context.URLBuilder.BuildBlobUploadChunkURL( + path, context.Upload.ID(), + url.Values{ + "_state": []string{token}, + }, + ) + if err != nil { + log.Info().Msgf("error building upload url: %s", err) + return err + } + + endRange := context.Upload.Size() + if endRange > 0 { + endRange-- + } + headers.Headers["Docker-Upload-UUID"] = context.UUID + headers.Headers["Location"] = uploadURL + + headers.Headers["Content-Length"] = "0" + headers.Headers["Range"] = fmt.Sprintf("0-%d", endRange) + + return nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus BlobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} + +func parseContentRange(cr string) (start int64, end int64, err error) { + rStart, rEnd, ok := strings.Cut(cr, "-") + if !ok { + return -1, -1, fmt.Errorf("invalid content range format, %s", cr) + } + start, err = strconv.ParseInt(rStart, 10, 64) + if err != nil { + return -1, -1, err + } + end, err = strconv.ParseInt(rEnd, 10, 64) + if err != nil { + return -1, -1, err + } + return start, end, nil +} + +func (r *LocalRegistry) dbBlobLinkExists( + ctx context.Context, dgst digest.Digest, repoKey string, + info pkg.RegistryInfo, +) error { + reg, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return err + } + if r == nil { + err := errcode.ErrCodeNameUnknown.WithDetail(repoKey) + return err + } + blob, err := r.blobRepo.FindByDigestAndRootParentID(ctx, dgst, info.RootParentID) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + err = errcode.ErrCodeBlobUnknown.WithDetail(dgst) + } + return err + } + + err = r.tx.WithTx( + ctx, func(ctx context.Context) error { + // Prevent long running transactions by setting an upper limit of blobExistsGCLockTimeout. If the GC is holding + // the lock of a related review record, the processing there should be fast enough to avoid this. Regardless, we + // should not let transactions open (and clients waiting) for too long. If this sensible timeout is exceeded, abort + // the operation and let the client retry. This will bubble up and lead to a 503 Service Unavailable response. + ctx, cancel := context.WithTimeout(ctx, blobExistsGCLockTimeout) + defer cancel() + + bt, err := r.gcService.BlobFindAndLockBefore(ctx, blob.ID, time.Now().Add(blobExistsGCReviewWindow)) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return err + } + + if bt != nil { + err = r.gcService.BlobReschedule(ctx, bt, 24*time.Hour) + if err != nil { + return err + } + } + + found, err := r.blobRepo.ExistsBlob(ctx, reg.ID, dgst, info.Image) + if err != nil { + return err + } + + if !found { + err := errcode.ErrCodeBlobUnknown.WithDetail(dgst) + return err + } + return nil + }, + ) + + if err != nil { + return fmt.Errorf("committing database transaction: %w", err) + } + + return nil +} + +func (r *LocalRegistry) dbPutBlobUploadComplete( + ctx context.Context, + repoName string, + mediaType string, + digestVal string, + size int, + info pkg.RegistryInfo, +) error { + blob := &types.Blob{ + RootParentID: info.RootParentID, + Digest: digest.Digest(digestVal), + MediaType: mediaType, + Size: int64(size), + } + + err := r.tx.WithTx( + ctx, func(ctx context.Context) error { + registry, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoName) + if err != nil { + return err + } + + storedBlob, err := r.blobRepo.CreateOrFind(ctx, blob) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + + // link blob to repository + if err := r.registryBlobDao.LinkBlob(ctx, info.Image, registry, storedBlob.ID); err != nil { + return err + } + + artifact := &types.Artifact{ + Name: info.Image, + RegistryID: registry.ID, + Enabled: false, + } + + if err := r.artifactDao.CreateOrUpdate(ctx, artifact); err != nil { + return err + } + + now := time.Now().UTC() + + midnight := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 1, 0, time.UTC) + + artifactStat := &types.ArtifactStat{ + ArtifactID: artifact.ID, + Date: midnight.UnixMilli(), + UploadBytes: int64(size), + } + + if err := r.artifactStatDao.CreateOrUpdate(ctx, artifactStat); err != nil { + return err + } + + return nil + }, dbtx.TxDefault, + ) + if err != nil { + log.Error().Msgf("failed to put blob in database: %v", err) + return fmt.Errorf("committing database transaction: %w", err) + } + + return nil +} + +// dbDeleteBlob does not actually delete a blob from the database +// (that's GC's responsibility), it only unlinks it from +// a repository. +func (r *LocalRegistry) dbDeleteBlob( + ctx *Context, config *gitnesstypes.Config, + repoName string, d digest.Digest, info pkg.RegistryInfo, +) error { + log.Debug().Msgf("deleting blob from repository in database") + + if !config.Registry.Storage.S3Storage.Delete { + return storage.ErrUnsupported + } + + reg, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoName) + + if err != nil { + return err + } + if r == nil { + return storage.RegistryUnknownError{Name: repoName} + } + + blob, err := r.blobRepo.FindByDigestAndRepoID(ctx, d, reg.ID, info.Image) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return storage.ErrBlobUnknown + } + return err + } + found, err := r.registryBlobDao.UnlinkBlob(ctx, info.Image, reg, blob.ID) + if err != nil { + return err + } + if !found { + return storage.ErrBlobUnknown + } + + return nil +} + +func (r *LocalRegistry) dbGetTags( + ctx context.Context, filters types.FilterParams, + info pkg.RegistryInfo, +) ([]string, bool, error) { + log.Debug().Msgf("finding tags in database") + + reg, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, info.RegIdentifier) + if err != nil { + return nil, false, err + } + if r == nil { + return nil, false, + errcode.ErrCodeNameUnknown.WithDetail(map[string]string{"name": info.RegIdentifier}) + } + + tt, err := r.tagDao.TagsPaginated(ctx, reg.ID, info.Image, filters) + if err != nil { + return nil, false, err + } + + tags := make([]string, 0, len(tt)) + for _, t := range tt { + tags = append(tags, t.Name) + } + + var moreEntries bool + if len(tt) > 0 { + filters.LastEntry = tt[len(tt)-1].Name + moreEntries, err = r.tagDao.HasTagsAfterName(ctx, reg.ID, filters) + if err != nil { + return nil, false, err + } + } + + return tags, moreEntries, nil +} + +func (r *LocalRegistry) dbMountBlob( + ctx context.Context, fromRepo, toRepo string, + d digest.Digest, info pkg.RegistryInfo, +) error { + log.Debug().Msgf("cross repository blob mounting") + + destRepo, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, toRepo) + if err != nil { + return err + } + if destRepo == nil { + return fmt.Errorf( + "destination repository: [%s] not found in database", + toRepo, + ) + } + sourceRepo, err := r.registryDao.GetByParentIDAndName(ctx, info.ParentID, fromRepo) + if err != nil { + return err + } + if sourceRepo == nil { + return fmt.Errorf( + "source repository: [%s] not found in database", + fromRepo, + ) + } + + b, err := r.ms.DBFindRepositoryBlob( + ctx, manifest.Descriptor{Digest: d}, + sourceRepo.ID, info, + ) + if err != nil { + return err + } + + return r.registryBlobDao.LinkBlob(ctx, info.Image, destRepo, b.ID) +} diff --git a/registry/app/pkg/docker/manifest_service.go b/registry/app/pkg/docker/manifest_service.go new file mode 100644 index 000000000..8684c40ae --- /dev/null +++ b/registry/app/pkg/docker/manifest_service.go @@ -0,0 +1,916 @@ +// Source: https://gitlab.com/gitlab-org/container-registry + +// Copyright 2019 Gitlab Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/manifest/manifestlist" + "github.com/harness/gitness/registry/app/manifest/ocischema" + "github.com/harness/gitness/registry/app/manifest/schema2" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/gc" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/distribution/distribution/v3" + "github.com/distribution/distribution/v3/registry/api/errcode" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog/log" +) + +type manifestService struct { + registryDao store.RegistryRepository + manifestDao store.ManifestRepository + layerDao store.LayerRepository + blobRepo store.BlobRepository + mtRepository store.MediaTypesRepository + tagDao store.TagRepository + artifactDao store.ArtifactRepository + artifactStatDao store.ArtifactStatRepository + manifestRefDao store.ManifestReferenceRepository + gcService gc.Service + tx dbtx.Transactor +} + +func NewManifestService( + registryDao store.RegistryRepository, manifestDao store.ManifestRepository, + blobRepo store.BlobRepository, mtRepository store.MediaTypesRepository, tagDao store.TagRepository, + artifactDao store.ArtifactRepository, artifactStatDao store.ArtifactStatRepository, + layerDao store.LayerRepository, manifestRefDao store.ManifestReferenceRepository, + tx dbtx.Transactor, gcService gc.Service, +) ManifestService { + return &manifestService{ + registryDao: registryDao, + manifestDao: manifestDao, + layerDao: layerDao, + blobRepo: blobRepo, + mtRepository: mtRepository, + tagDao: tagDao, + artifactDao: artifactDao, + artifactStatDao: artifactStatDao, + manifestRefDao: manifestRefDao, + gcService: gcService, + tx: tx, + } +} + +type ManifestService interface { + // GetTags gets the tags of a repository + DBTag( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + tag string, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, + ) error + DBPut( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, + ) error + DeleteTag(ctx context.Context, repoKey string, tag string, info pkg.RegistryInfo) (bool, error) + DeleteManifest(ctx context.Context, repoKey string, d digest.Digest, info pkg.RegistryInfo) error + DBFindRepositoryBlob( + ctx context.Context, desc manifest.Descriptor, repoID int64, + info pkg.RegistryInfo, + ) (*types.Blob, error) +} + +func (l *manifestService) DBTag( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + tag string, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + imageName := info.Image + + if err := l.dbTagManifest(ctx, d, tag, imageName, info); err != nil { + log.Ctx(ctx).Error().Err(err).Msg("failed to create tag in database") + err2 := l.handleTagError(ctx, mfst, d, tag, repoKey, headers, info, err, imageName) + if err2 != nil { + return err2 + } + } + + return nil +} + +func (l *manifestService) handleTagError( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + tag string, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, + err error, + imageName string, +) error { + if errors.Is(err, util.ErrManifestNotFound) { + // If online GC was already reviewing the manifest that we want to tag, and that manifest had no + // tags before the review start, the API is unable to stop the GC from deleting the manifest (as + // the GC already acquired the lock on the corresponding queue row). This means that once the API + // is unblocked and tries to create the tag, a foreign key violation error will occur (because we're + // trying to create a tag for a manifest that no longer exists) and lead to this specific error. + // This should be extremely rare, if it ever occurs, but if it does, we should recreate the manifest + // and tag it, instead of returning a "manifest not found response" to clients. It's expected that + // this route handles the creation of a manifest if it doesn't exist already. + if err = l.DBPut(ctx, mfst, "", repoKey, headers, info); err != nil { + return fmt.Errorf("failed to recreate manifest in database: %w", err) + } + if err = l.dbTagManifest(ctx, d, tag, imageName, info); err != nil { + return fmt.Errorf("failed to create tag in database after manifest recreate: %w", err) + } + } else { + return fmt.Errorf("failed to create tag in database: %w", err) + } + return nil +} + +func (l *manifestService) dbTagManifest( + ctx context.Context, + dgst digest.Digest, + tagName, imageName string, + info pkg.RegistryInfo, +) error { + dbRepo, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, info.RegIdentifier) + if err != nil { + return err + } + newDigest, err := types.NewDigest(dgst) + if err != nil { + return err + } + dbManifest, err := l.manifestDao.FindManifestByDigest(ctx, dbRepo.ID, info.Image, newDigest) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return fmt.Errorf("manifest %s not found in database", dgst) + } + return err + } + + // We need to find and lock a GC manifest task that is related with the manifest that we're about to tag. This + // is needed to ensure we lock any related online GC tasks to prevent race conditions around the tag creation. See: + + return l.tx.WithTx( + ctx, func(ctx context.Context) error { + // Prevent long running transactions by setting an upper limit of manifestTagGCLockTimeout. If the GC is holding + // the lock of a related review record, the processing there should be fast enough to avoid this. Regardless, we + // should not let transactions open (and clients waiting) for too long. If this sensible timeout is exceeded, abort + // the tag creation and let the client retry. This will bubble up and lead to a 503 Service Unavailable response. + ctx, cancel := context.WithTimeout(ctx, manifestTagGCLockTimeout) + defer cancel() + + if _, err := l.gcService.ManifestFindAndLockBefore( + ctx, dbRepo.ID, dbManifest.ID, + time.Now().Add(manifestTagGCReviewWindow), + ); err != nil && !errors.Is(err, sql.ErrNoRows) { + return err + } + + artifact := &types.Artifact{ + Name: imageName, + RegistryID: dbRepo.ID, + Enabled: true, + } + + if err := l.artifactDao.CreateOrUpdate(ctx, artifact); err != nil { + return err + } + + tag := &types.Tag{ + Name: tagName, + ImageName: imageName, + RegistryID: dbRepo.ID, + ManifestID: dbManifest.ID, + } + + if err := l.tagDao.CreateOrUpdate(ctx, tag); err != nil { + return err + } + + a, err := l.artifactDao.GetByName(ctx, dbRepo.ID, imageName) + if err != nil { + return err + } + now := time.Now().UTC() + + midnight := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 1, 0, time.UTC) + + artifactStat := &types.ArtifactStat{ + ArtifactID: a.ID, + Date: midnight.UnixMilli(), + DownloadCount: 1, + } + + if err := l.artifactStatDao.CreateOrUpdate(ctx, artifactStat); err != nil { + return err + } + + return nil + }, + ) +} + +func (l *manifestService) DBPut( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + _, payload, err := mfst.Payload() + if err != nil { + return err + } + + err = l.dbPutManifest(ctx, mfst, payload, d, repoKey, headers, info) + var mtErr util.UnknownMediaTypeError + if errors.As(err, &mtErr) { + return errcode.ErrorCodeManifestInvalid.WithDetail(mtErr.Error()) + } + return err +} + +func (l *manifestService) dbPutManifest( + ctx context.Context, + manifest manifest.Manifest, + payload []byte, + d digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + switch reqManifest := manifest.(type) { + case *schema2.DeserializedManifest: + return l.dbPutManifestSchema2(ctx, reqManifest, payload, d, repoKey, headers, info) + case *ocischema.DeserializedManifest: + return l.dbPutManifestOCI(ctx, reqManifest, payload, d, repoKey, headers, info) + case *manifestlist.DeserializedManifestList: + return l.dbPutManifestList(ctx, reqManifest, payload, d, repoKey, headers, info) + case *ocischema.DeserializedImageIndex: + return l.dbPutImageIndex(ctx, reqManifest, payload, d, repoKey, headers, info) + default: + return errcode.ErrorCodeManifestInvalid.WithDetail("manifest type unsupported") + } +} + +func (l *manifestService) dbPutManifestSchema2( + ctx context.Context, + manifest *schema2.DeserializedManifest, + payload []byte, + d digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + return l.dbPutManifestV2(ctx, manifest, payload, false, d, repoKey, headers, info) +} + +func (l *manifestService) dbPutManifestV2( + ctx context.Context, + mfst manifest.ManifestV2, + payload []byte, + nonConformant bool, + digest digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + // find target repository + dbRepo, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return err + } + if dbRepo == nil { + return errors.New("repository not found in database") + } + + // Find the config now to ensure that the config's blob is associated with the repository. + dbCfgBlob, err := l.DBFindRepositoryBlob(ctx, mfst.Config(), dbRepo.ID, info) + if err != nil { + return err + } + + dgst, err := types.NewDigest(digest) + if err != nil { + return err + } + + dbManifest, err := l.manifestDao.FindManifestByDigest(ctx, dbRepo.ID, info.Image, dgst) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + + if dbManifest != nil { + return nil + } + + log.Debug().Msgf("manifest not found in database") + + cfg := &types.Configuration{ + MediaType: mfst.Config().MediaType, + Digest: dbCfgBlob.Digest, + BlobID: dbCfgBlob.ID, + } + + //TODO: check if we need to store the config payload in the database + + // skip retrieval and caching of config payload if its size is over the limit + /*if dbCfgBlob.Size <= datastore.ConfigSizeLimit { + // Since filesystem writes may be optional, We cannot be sure that the + // repository scoped filesystem blob service will have a link to the + // configuration blob; however, since we check for repository scoped access + // via the database above, we may retrieve the blob directly common storage. + cfgPayload, err := imh.blobProvider.Get(imh, dbCfgBlob.Digest) + if err != nil { + return err + } + cfg.Payload = cfgPayload + }*/ + + m := &types.Manifest{ + RegistryID: dbRepo.ID, + TotalSize: mfst.TotalSize(), + SchemaVersion: mfst.Version().SchemaVersion, + MediaType: mfst.Version().MediaType, + Digest: digest, + Payload: payload, + Configuration: cfg, + NonConformant: nonConformant, + ImageName: info.Image, + } + + var artifactMediaType sql.NullString + ocim, ok := mfst.(manifest.ManifestOCI) + if ok { + subjectHandlingError := l.handleSubject( + ctx, ocim.Subject(), ocim.ArtifactType(), + ocim.Annotations(), dbRepo, m, headers, info, + ) + if subjectHandlingError != nil { + return subjectHandlingError + } + if ocim.ArtifactType() != "" { + artifactMediaType.Valid = true + artifactMediaType.String = ocim.ArtifactType() + m.ArtifactType = artifactMediaType + } + } else if mfst.Config().MediaType != "" { + artifactMediaType.Valid = true + artifactMediaType.String = mfst.Config().MediaType + m.ArtifactType = artifactMediaType + } + + // check if the manifest references non-distributable layers and mark it as such on the DB + ll := mfst.DistributableLayers() + m.NonDistributableLayers = len(ll) < len(mfst.Layers()) + + // Use CreateOrFind to prevent race conditions while pushing the same manifest with digest for different tags + if err := l.manifestDao.CreateOrFind(ctx, m); err != nil { + return err + } + + dbManifest = m + + // find and associate distributable manifest layer blobs + for _, reqLayer := range mfst.DistributableLayers() { + dbBlob, err := l.DBFindRepositoryBlob(ctx, reqLayer, dbRepo.ID, info) + if err != nil { + return err + } + + // Overwrite the media type from common blob storage with the one + // specified in the manifest json for the layer entity. The layer entity + // has a 1-1 relationship with with the manifest, so we want to reflect + // the manifest's description of the layer. Multiple manifest can reference + // the same blob, so the common blob storage should remain generic. + if ok2 := l.layerMediaTypeExists(ctx, reqLayer.MediaType); ok2 { + dbBlob.MediaType = reqLayer.MediaType + } + + if err2 := l.layerDao.AssociateLayerBlob(ctx, dbManifest, dbBlob); err2 != nil { + return err2 + } + } + + return nil +} + +func (l *manifestService) DBFindRepositoryBlob( + ctx context.Context, desc manifest.Descriptor, + repoID int64, info pkg.RegistryInfo, +) (*types.Blob, error) { + image := info.Image + b, err := l.blobRepo.FindByDigestAndRepoID(ctx, desc.Digest, repoID, image) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return nil, fmt.Errorf("blob not found in database") + } + return nil, err + } + return b, nil +} + +func (l *manifestService) handleSubject( + ctx context.Context, subject manifest.Descriptor, + artifactType string, annotations map[string]string, dbRepo *types.Registry, + m *types.Manifest, headers *commons.ResponseHeaders, info pkg.RegistryInfo, +) error { + if subject.Digest.String() != "" { + // Fetch subject_id from digest + subjectDigest, err := types.NewDigest(subject.Digest) + if err != nil { + return err + } + dbSubject, err := l.manifestDao.FindManifestByDigest(ctx, dbRepo.ID, info.Image, subjectDigest) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + + if errors.Is(err, store2.ErrResourceNotFound) { + // in case something happened to the referenced manifest after validation + // return distribution.ManifestBlobUnknownError{Digest: subject.Digest} + log.Ctx(ctx).Warn().Msgf("subject manifest not found in database") + } else { + m.SubjectID.Int64 = dbSubject.ID + m.SubjectID.Valid = true + } + m.SubjectDigest = subject.Digest + headers.Headers["OCI-Subject"] = subject.Digest.String() + } + + if artifactType != "" { + m.ArtifactType.String = artifactType + m.ArtifactType.Valid = true + } + m.Annotations = annotations + return nil +} + +func (l *manifestService) dbPutManifestOCI( + ctx context.Context, + manifest *ocischema.DeserializedManifest, + payload []byte, + d digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + return l.dbPutManifestV2(ctx, manifest, payload, false, d, repoKey, headers, info) +} + +func (l *manifestService) dbPutManifestList( + ctx context.Context, + manifestList *manifestlist.DeserializedManifestList, + payload []byte, + digest digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + if LikelyBuildxCache(manifestList) { + return l.dbPutBuildkitIndex(ctx, manifestList, payload, digest, repoKey, headers, info) + } + + r, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return err + } + if r == nil { + return errors.New("repository not found in database") + } + + dgst, err := types.NewDigest(digest) + if err != nil { + return err + } + + ml, err := l.manifestDao.FindManifestByDigest(ctx, r.ID, info.Image, dgst) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + + // Media type can be either Docker (`application/vnd.docker.distribution.manifest.list.v2+json`) + // or OCI (empty). + // We need to make it explicit if empty, otherwise we're not able to distinguish between media types. + mediaType := manifestList.MediaType + if mediaType == "" { + mediaType = v1.MediaTypeImageIndex + } + + ml = &types.Manifest{ + RegistryID: r.ID, + SchemaVersion: manifestList.SchemaVersion, + MediaType: mediaType, + Digest: digest, + Payload: payload, + ImageName: info.Image, + } + + mm := make([]*types.Manifest, 0, len(manifestList.Manifests)) + ids := make([]int64, 0, len(mm)) + for _, desc := range manifestList.Manifests { + m, err := l.dbFindManifestListManifest(ctx, r, info.Image, desc.Digest) + if err != nil { + return err + } + mm = append(mm, m) + ids = append(ids, m.ID) + } + + err = l.tx.WithTx( + ctx, func(ctx context.Context) error { + // Prevent long running transactions by setting an upper limit of + // manifestListCreateGCLockTimeout. If the GC is + // holding the lock of a related review record, the processing + // there should be fast enough to avoid this. + // Regardless, we should not let transactions open (and clients waiting) + // for too long. If this sensible timeout + // is exceeded, abort the request and let the client retry. + // This will bubble up and lead to a 503 Service + // Unavailable response. + ctx, cancel := context.WithTimeout(ctx, manifestListCreateGCLockTimeout) + defer cancel() + + if _, err := l.gcService.ManifestFindAndLockNBefore( + ctx, r.ID, ids, + time.Now().Add(manifestListCreateGCReviewWindow), + ); err != nil { + return err + } + + // use CreateOrFind to prevent race conditions when the same digest is used by different tags + // and pushed at the same time + if err := l.manifestDao.CreateOrFind(ctx, ml); err != nil { + return err + } + + // Associate manifests to the manifest list. + for _, m := range mm { + if err := l.manifestRefDao.AssociateManifest(ctx, ml, m); err != nil { + if errors.Is(err, util.ErrRefManifestNotFound) { + // This can only happen if the online GC deleted one + // of the referenced manifests (because they were + // untagged/unreferenced) between the call to + // `FindAndLockNBefore` and `AssociateManifest`. For now + // we need to return this error to mimic the behaviour + // of the corresponding filesystem validation. + return distribution.ErrManifestVerification{ + distribution.ErrManifestBlobUnknown{Digest: m.Digest}, + } + } + return err + } + } + return nil + }, + ) + + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("failed to create manifest list in database") + } + return err +} + +func (l *manifestService) dbPutImageIndex( + ctx context.Context, + imageIndex *ocischema.DeserializedImageIndex, + payload []byte, + digest digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + r, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return err + } + if r == nil { + return errors.New("repository not found in database") + } + + dgst, err := types.NewDigest(digest) + if err != nil { + return err + } + + mi, err := l.manifestDao.FindManifestByDigest(ctx, r.ID, info.Image, dgst) + if err != nil && !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + + // Media type can be either Docker (`application/vnd.docker.distribution.manifest.list.v2+json`) + // or OCI (empty). + // We need to make it explicit if empty, otherwise we're not able to distinguish + // between media types. + mediaType := imageIndex.MediaType + if mediaType == "" { + mediaType = v1.MediaTypeImageIndex + } + + mi = &types.Manifest{ + RegistryID: r.ID, + SchemaVersion: imageIndex.SchemaVersion, + MediaType: mediaType, + Digest: digest, + Payload: payload, + ImageName: info.Image, + } + + subjectHandlingError := l.handleSubject( + ctx, imageIndex.Subject(), imageIndex.ArtifactType(), + imageIndex.Annotations(), r, mi, headers, info, + ) + if subjectHandlingError != nil { + return subjectHandlingError + } + + mm := make([]*types.Manifest, 0, len(imageIndex.Manifests)) + ids := make([]int64, 0, len(mm)) + for _, desc := range imageIndex.Manifests { + m, err := l.dbFindManifestListManifest(ctx, r, info.Image, desc.Digest) + if err != nil { + return err + } + mm = append(mm, m) + ids = append(ids, m.ID) + } + + err = l.tx.WithTx( + ctx, func(ctx context.Context) error { + // Prevent long running transactions by setting an upper limit of + // manifestListCreateGCLockTimeout. If the GC is + // holding the lock of a related review record, the processing + // there should be fast enough to avoid this. + // Regardless, we should not let transactions open (and clients waiting) + // for too long. If this sensible timeout + // is exceeded, abort the request and let the client retry. + // This will bubble up and lead to a 503 Service + // Unavailable response. + ctx, cancel := context.WithTimeout(ctx, manifestListCreateGCLockTimeout) + defer cancel() + + if _, err := l.gcService.ManifestFindAndLockNBefore( + ctx, r.ID, ids, + time.Now().Add(manifestListCreateGCReviewWindow), + ); err != nil { + return err + } + // use CreateOrFind to prevent race conditions when the same digest is used by different tags + // and pushed at the same time + if err := l.manifestDao.CreateOrFind(ctx, mi); err != nil { + return err + } + + // Associate manifests to the manifest list. + for _, m := range mm { + if err := l.manifestRefDao.AssociateManifest(ctx, mi, m); err != nil { + if errors.Is(err, util.ErrRefManifestNotFound) { + // This can only happen if the online GC deleted one of the + // referenced manifests (because they were + // untagged/unreferenced) between the call to + // `FindAndLockNBefore` and `AssociateManifest`. For now + // we need to return this error to mimic the behaviour + // of the corresponding filesystem validation. + return distribution.ErrManifestVerification{ + distribution.ErrManifestBlobUnknown{Digest: m.Digest}, + } + } + return err + } + } + return nil + }, + ) + + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("failed to create image index in database") + } + return err +} + +func (l *manifestService) dbPutBuildkitIndex( + ctx context.Context, + ml *manifestlist.DeserializedManifestList, + payload []byte, + digest digest.Digest, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, +) error { + // convert to OCI manifest and process as if it was one + m, err := OCIManifestFromBuildkitIndex(ml) + if err != nil { + return fmt.Errorf("converting buildkit index to manifest: %w", err) + } + + // Note that `payload` is not the deserialized manifest (`m`) payload but + // rather the index payload, untouched. + // Within dbPutManifestOCIOrSchema2 we use this value for the + // `manifests.payload` column and source the value for + // the `manifests.digest` column from `imh.Digest`, and not from `m`. + // Therefore, we keep behavioral consistency for + // the outside world by preserving the index payload and digest while + // storing things internally as an OCI manifest. + return l.dbPutManifestV2(ctx, m, payload, true, digest, repoKey, headers, info) +} + +func (l *manifestService) dbFindManifestListManifest( + ctx context.Context, + repository *types.Registry, imageName string, digest digest.Digest, +) (*types.Manifest, error) { + dgst, err := types.NewDigest(digest) + if err != nil { + return nil, err + } + dbManifest, err := l.manifestDao.FindManifestByDigest( + ctx, repository.ID, + imageName, dgst, + ) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return nil, fmt.Errorf( + "manifest %s not found for %s/%s", digest.String(), + repository.Name, imageName, + ) + } + return nil, err + } + + return dbManifest, nil +} + +func (l *manifestService) layerMediaTypeExists(ctx context.Context, mt string) bool { + exists, err := l.mtRepository.MediaTypeExists(ctx, mt) + if err != nil { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("error checking for existence of media type: %v", err) + return false + } + + if exists { + return true + } + + log.Ctx(ctx).Warn().Msgf("unknown layer media type") + + return false +} + +func (l *manifestService) DeleteTag( + ctx context.Context, + repoKey string, + tag string, + info pkg.RegistryInfo, +) (bool, error) { + // Fetch the registry by parent ID and name + registry, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + if err != nil { + return false, err + } + + found, err := l.tagDao.DeleteTagByName(ctx, registry.ID, tag) + if err != nil { + return false, fmt.Errorf("failed to delete tag in database: %w", err) + } + if !found { + return false, distribution.ErrTagUnknown{Tag: tag} + } + + return true, nil +} + +func (l *manifestService) DeleteTagsByManifestID( + ctx context.Context, + repoKey string, + manifestID int64, + info pkg.RegistryInfo, +) (bool, error) { + registry, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + + if err != nil { + return false, err + } + + return l.tagDao.DeleteTagByManifestID(ctx, registry.ID, manifestID) +} + +func (l *manifestService) DeleteManifest( + ctx context.Context, + repoKey string, + d digest.Digest, + info pkg.RegistryInfo, +) error { + log.Ctx(ctx).Debug().Msg("deleting manifest from repository in database") + + registry, err := l.registryDao.GetByParentIDAndName(ctx, info.ParentID, repoKey) + imageName := info.Image + + if registry == nil || err != nil { + return fmt.Errorf("repository not found in database: %w", err) + } + + // We need to find the manifest first and then lookup for any manifest + // it references (if it's a manifest list). This + // is needed to ensure we lock any related online GC tasks to prevent + // race conditions around the delete. + newDigest, err := types.NewDigest(d) + if err != nil { + return err + } + m, err := l.manifestDao.FindManifestByDigest(ctx, registry.ID, imageName, newDigest) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return util.ErrManifestNotFound + } + return err + } + + return l.tx.WithTx( + ctx, func(ctx context.Context) error { + switch m.MediaType { + case manifestlist.MediaTypeManifestList, v1.MediaTypeImageIndex: + mm, err := l.manifestDao.References(ctx, m) + if err != nil { + return err + } + + // This should never happen, as it's not possible to delete a + // child manifest if it's referenced by a list, which + // means that we'll always have at least one child manifest here. + // Nevertheless, log error if this ever happens. + if len(mm) == 0 { + log.Ctx(ctx).Error().Stack().Err(err).Msgf("stored manifest list has no references") + break + } + ids := make([]int64, 0, len(mm)) + for _, m := range mm { + ids = append(ids, m.ID) + } + + // Prevent long running transactions by setting an upper limit of + // manifestDeleteGCLockTimeout. If the GC is + // holding the lock of a related review record, the processing + // there should be fast enough to avoid this. + // Regardless, we should not let transactions open (and clients waiting) + // for too long. If this sensible timeout + // is exceeded, abort the manifest delete and let the client retry. + // This will bubble up and lead to a 503 + // Service Unavailable response. + ctx, cancel := context.WithTimeout(ctx, manifestDeleteGCLockTimeout) + defer cancel() + + if _, err := l.gcService.ManifestFindAndLockNBefore( + ctx, registry.ID, + ids, time.Now().Add(manifestDeleteGCReviewWindow), + ); err != nil { + return err + } + } + + found, err := l.manifestDao.DeleteManifest(ctx, registry.ID, imageName, d) + if err != nil { + return err + } + if !found { + return util.ErrManifestNotFound + } + + return nil + }, + ) +} diff --git a/registry/app/pkg/docker/registry.go b/registry/app/pkg/docker/registry.go new file mode 100644 index 000000000..b98049090 --- /dev/null +++ b/registry/app/pkg/docker/registry.go @@ -0,0 +1,196 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "io" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +/* +Registry defines the capabilities that an artifact registry should have. It should support following methods: + +// Manifest - GET, HEAD, PUT, DELETE +// Catalog - GET +// Tag - GET, DELETE +// Blob - GET, HEAD, DELETE +// Blob Upload - GET, HEAD, POST, PATCH, PUT, DELETE + +ref: https://github.com/opencontainers/distribution-spec/blob/main/spec.md + +Endpoints to support: + +| ID | Method | API Endpoint | Success | Failure +| ------- | -------------- | -------------------------------------------------------------- | ----------- | ----------- +| end-1 | `GET` | `/v2/` | `200` | `404`/`401` +| end-2 | `GET` / `HEAD` | `/v2//blobs/` | `200` | `404` +| end-3 | `GET` / `HEAD` | `/v2//manifests/` | `200` | `404` +| end-4a | `POST` | `/v2//blobs/uploads/` | `202` | `404` +| end-4b | `POST` | `/v2//blobs/uploads/?digest=` | `201`/`202` | `404`/`400` +| end-5 | `PATCH` | `/v2//blobs/uploads/` | `202` | `404`/`416` +| end-6 | `PUT` | `/v2//blobs/uploads/?digest=` | `201` | `404`/`400` +| end-7 | `PUT` | `/v2//manifests/` | `201` | `404` +| end-8a | `GET` | `/v2//tags/list` | `200` | `404` +| end-8b | `GET` | `/v2//tags/list?n=&last=` | `200` | `404` +| end-9 | `DELETE` | `/v2//manifests/` | `202` | `404`/`400` +| | | | | /`405` +| end-10 | `DELETE` | `/v2//blobs/` | `202` | `404`/`405` +| end-11 | `POST` | `/v2//blobs/uploads/?mount=&from=` | `201` | `404` +| end-12a | `GET` | `/v2//referrers/` | `200` | `404`/`400` +| end-12b | `GET` | `/v2//referrers/?artifactType=` | `200` | `404`/`400` +| end-13 | `GET` | `/v2//blobs/uploads/` | `204` | `404` +|. +*/ +type Registry interface { + pkg.Artifact + + // end-1. + Base() error + + // end-2 HEAD / GET + HeadBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + ) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, + readCloser io.ReadCloser, redirectURL string, errs []error, + ) + GetBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + ) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, + readCloser io.ReadCloser, redirectURL string, errs []error, + ) + + // end-3 HEAD + ManifestExist( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, + ) ( + responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifest manifest.Manifest, + Errors []error, + ) + // end-3 GET + PullManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, + ) ( + responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifest manifest.Manifest, + Errors []error, + ) + + // end-4a. + PushBlobMonolith(ctx context.Context, artInfo pkg.RegistryInfo, size int64, blob io.Reader) error + InitBlobUpload( + ctx context.Context, + artInfo pkg.RegistryInfo, + fromRepo, mountDigest string, + ) (*commons.ResponseHeaders, []error) + // end-4b + PushBlobMonolithWithDigest(ctx context.Context, artInfo pkg.RegistryInfo, size int64, blob io.Reader) error + + // end-5 + PushBlobChunk( + ctx *Context, + artInfo pkg.RegistryInfo, + contentType string, + contentRange string, + contentLength string, + body io.ReadCloser, + contentLengthFromRequest int64, + ) (*commons.ResponseHeaders, []error) + + // end-6 + PushBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + body io.ReadCloser, + contentLength int64, + stateToken string, + ) (*commons.ResponseHeaders, []error) + + // end-7 + PutManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + mediaType string, + body io.ReadCloser, + length int64, + ) (*commons.ResponseHeaders, []error) + + // end-8a + ListTags( + c context.Context, + lastEntry string, + maxEntries int, + origURL string, + artInfo pkg.RegistryInfo, + ) (*commons.ResponseHeaders, []string, error) + // end-8b + ListFilteredTags( + ctx context.Context, + n int, + last, repository string, + artInfo pkg.RegistryInfo, + ) (tags []string, err error) + + // end-9 + DeleteManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + ) (errs []error, responseHeaders *commons.ResponseHeaders) + // the "reference" can be "tag" or "digest", the function needs to handle both + + // end-10. + DeleteBlob(ctx *Context, artInfo pkg.RegistryInfo) (responseHeaders *commons.ResponseHeaders, errs []error) + + // end-11. + MountBlob(ctx context.Context, artInfo pkg.RegistryInfo, srcRepository, dstRepository string) (err error) + + // end-12a/12b + ListReferrers( + ctx context.Context, + artInfo pkg.RegistryInfo, + artifactType string, + ) (index *v1.Index, responseHeaders *commons.ResponseHeaders, err error) + + // end-13. + GetBlobUploadStatus(ctx *Context, artInfo pkg.RegistryInfo, stateToken string) (*commons.ResponseHeaders, []error) + + // Catalog GET. + GetCatalog() (repositories []string, err error) + // Tag DELETE. + DeleteTag(repository, tag string, artInfo pkg.RegistryInfo) error + // Blob chunk PULL + PullBlobChunk( + repository, digest string, + blobSize, start, end int64, + artInfo pkg.RegistryInfo, + ) (size int64, blob io.ReadCloser, err error) + // Mount check + CanBeMount() (mount bool, repository string, err error) +} diff --git a/registry/app/pkg/docker/remote.go b/registry/app/pkg/docker/remote.go new file mode 100644 index 000000000..6a52bfaa1 --- /dev/null +++ b/registry/app/pkg/docker/remote.go @@ -0,0 +1,589 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/harness/gitness/app/api/request" + store2 "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + "github.com/harness/gitness/registry/app/common/lib/errors" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + proxy2 "github.com/harness/gitness/registry/app/remote/controller/proxy" + "github.com/harness/gitness/registry/app/storage" + "github.com/harness/gitness/registry/app/store" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog/log" +) + +const ( + contentLength = "Content-Length" + contentType = "Content-Type" + dockerContentDigest = "Docker-Content-Digest" + etag = "Etag" + ensureTagInterval = 10 * time.Second + ensureTagMaxRetry = 60 +) + +func NewRemoteRegistry( + local *LocalRegistry, + app *App, + upstreamProxyConfigRepo store.UpstreamProxyConfigRepository, + secretStore store2.SecretStore, + encrypter encrypt.Encrypter, +) Registry { + return &RemoteRegistry{ + local: local, + App: app, + upstreamProxyConfigRepo: upstreamProxyConfigRepo, + secretStore: secretStore, + encrypter: encrypter, + } +} + +func (r *RemoteRegistry) GetArtifactType() string { + return "Remote Registry" +} + +type RemoteRegistry struct { + local *LocalRegistry + App *App + upstreamProxyConfigRepo store.UpstreamProxyConfigRepository + secretStore store2.SecretStore + encrypter encrypt.Encrypter +} + +func (r *RemoteRegistry) Base() error { + panic("Not implemented yet, will be done during Replication flows") +} + +func defaultLibrary() (bool, string, error) { + // get upstream Repository and check if the path contains library prefix. If yes, redirect to the correct path without + // library prefix. + return false, "", nil +} + +// defaultManifestURL return the real url for request with default project. +func defaultManifestURL(regIdentifier string, name string, a pkg.RegistryInfo) string { + return fmt.Sprintf("/v2/%s/library/%s/manifests/%s", regIdentifier, name, a.Reference) +} + +func proxyManifestHead( + ctx context.Context, + responseHeaders *commons.ResponseHeaders, + ctl proxy2.Controller, + art pkg.RegistryInfo, + remote proxy2.RemoteInterface, + info pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) error { + // remote call + exist, desc, err := ctl.HeadManifest(ctx, art, remote) + if err != nil { + return err + } + if !exist || desc == nil { + return errors.NotFoundError(fmt.Errorf("the tag %v:%v is not found", art.Image, art.Tag)) + } + + if len(art.Tag) > 0 { + go func(art pkg.RegistryInfo) { + // Write function to update local storage. + session, _ := request.AuthSessionFrom(ctx) + ctx2 := request.WithAuthSession(context.Background(), session) + tag := art.Tag + art.Tag = "" + art.Digest = desc.Digest.String() + + var count = 0 + for i := 0; i < ensureTagMaxRetry; i++ { + time.Sleep(ensureTagInterval) + count++ + log.Ctx(ctx).Info().Msgf("Ensure tag: %s for image: %s, retry: %d", tag, info.Image, count) + e := ctl.EnsureTag(ctx2, responseHeaders, art, acceptHeaders, ifNoneMatchHeader) + if e != nil { + log.Ctx(ctx).Warn().Err(e).Msgf("Failed to update tag: ") + } else { + log.Ctx(ctx).Info().Msgf("Tag updated: %s for image: %s", tag, info.Image) + return + } + } + }(art) + } + + responseHeaders.Headers[contentLength] = fmt.Sprintf("%v", desc.Size) + responseHeaders.Headers[contentType] = desc.MediaType + responseHeaders.Headers[dockerContentDigest] = string(desc.Digest) + responseHeaders.Headers[etag] = string(desc.Digest) + return nil +} + +func (r *RemoteRegistry) ManifestExist( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) ( + responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifestResult manifest.Manifest, + errs []error, +) { + proxyCtl := proxy2.ControllerInstance(r.local, r.local.ms) + responseHeaders = &commons.ResponseHeaders{ + Headers: make(map[string]string), + } + defaultProj, name, err := defaultLibrary() + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + registryInfo := artInfo + if defaultProj { + responseHeaders.Code = http.StatusMovedPermanently + responseHeaders.Headers = map[string]string{ + "Location": defaultManifestURL(artInfo.RegIdentifier, name, registryInfo), + } + return responseHeaders, descriptor, manifestResult, errs + } + + if !canProxy() { + errs = append(errs, errors.New("Proxy is down")) + return responseHeaders, descriptor, manifestResult, errs + } + + upstreamProxy, err := r.upstreamProxyConfigRepo.GetByRegistryIdentifier( + ctx, artInfo.ParentID, artInfo.RegIdentifier, + ) + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + remoteHelper, err := proxy2.NewRemoteHelper(ctx, r.secretStore, r.encrypter, artInfo.RegIdentifier, *upstreamProxy) + if err != nil { + errs = append(errs, errors.New("Proxy is down")) + return responseHeaders, descriptor, manifestResult, errs + } + useLocal, man, err := proxyCtl.UseLocalManifest(ctx, registryInfo, remoteHelper, acceptHeaders, ifNoneMatchHeader) + + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + + if useLocal { + if man != nil { + responseHeaders.Headers[contentLength] = fmt.Sprintf("%v", len(man.Content)) + responseHeaders.Headers[contentType] = man.ContentType + responseHeaders.Headers[dockerContentDigest] = man.Digest + responseHeaders.Headers[etag] = man.Digest + manifestResult, descriptor, err = manifest.UnmarshalManifest(man.ContentType, man.Content) + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + return responseHeaders, descriptor, manifestResult, errs + } + errs = append(errs, errors.New("Not found")) + } + + log.Ctx(ctx).Debug().Msgf("the tag is %s, digest is %s", registryInfo.Tag, registryInfo.Digest) + err = proxyManifestHead( + ctx, + responseHeaders, + proxyCtl, + registryInfo, + remoteHelper, + artInfo, + acceptHeaders, + ifNoneMatchHeader, + ) + + if err != nil { + errs = append(errs, err) + log.Ctx(ctx).Warn().Msgf( + "Proxy to remote failed, fallback to local registry: %s", + err.Error(), + ) + } + return responseHeaders, descriptor, manifestResult, errs +} + +func (r *RemoteRegistry) PullManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, +) ( + responseHeaders *commons.ResponseHeaders, descriptor manifest.Descriptor, manifestResult manifest.Manifest, + errs []error, +) { + proxyCtl := proxy2.ControllerInstance(r.local, r.local.ms) + responseHeaders = &commons.ResponseHeaders{ + Headers: make(map[string]string), + } + defaultProj, name, err := defaultLibrary() + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + registryInfo := artInfo + if defaultProj { + responseHeaders.Code = http.StatusMovedPermanently + responseHeaders.Headers = map[string]string{ + "Location": defaultManifestURL(artInfo.RegIdentifier, name, registryInfo), + } + return responseHeaders, descriptor, manifestResult, errs + } + + if !canProxy() { + errs = append(errs, errors.New("Proxy is down")) + return responseHeaders, descriptor, manifestResult, errs + } + upstreamProxy, err := r.upstreamProxyConfigRepo.GetByRegistryIdentifier( + ctx, artInfo.ParentID, artInfo.RegIdentifier, + ) + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + remoteHelper, err := proxy2.NewRemoteHelper(ctx, r.secretStore, r.encrypter, artInfo.RegIdentifier, *upstreamProxy) + if err != nil { + errs = append(errs, errors.New("Proxy is down")) + return responseHeaders, descriptor, manifestResult, errs + } + useLocal, man, err := proxyCtl.UseLocalManifest(ctx, registryInfo, remoteHelper, acceptHeaders, ifNoneMatchHeader) + + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + + if useLocal { + if man != nil { + responseHeaders.Headers[contentLength] = fmt.Sprintf("%v", len(man.Content)) + responseHeaders.Headers[contentType] = man.ContentType + responseHeaders.Headers[dockerContentDigest] = man.Digest + responseHeaders.Headers[etag] = man.Digest + manifestResult, descriptor, err = manifest.UnmarshalManifest(man.ContentType, man.Content) + if err != nil { + errs = append(errs, err) + return responseHeaders, descriptor, manifestResult, errs + } + return responseHeaders, descriptor, manifestResult, errs + } + errs = append(errs, errors.New("Not found")) + } + + log.Ctx(ctx).Debug().Msgf("the tag is %s, digest is %s", registryInfo.Tag, registryInfo.Digest) + log.Ctx(ctx).Warn(). + Msgf( + "Artifact: %s:%v, digest:%v is not found in proxy cache, fetch it from remote registry", + artInfo.RegIdentifier, registryInfo.Tag, registryInfo.Digest, + ) + manifestResult, err = proxyManifestGet( + ctx, + responseHeaders, + proxyCtl, + registryInfo, + remoteHelper, + artInfo.RegIdentifier, + artInfo.Image, + acceptHeaders, + ifNoneMatchHeader, + ) + if err != nil { + errs = append(errs, err) + log.Ctx(ctx).Warn().Msgf("Proxy to remote failed, fallback to local registry: %s", err.Error()) + } + return responseHeaders, descriptor, manifestResult, errs +} + +func (r *RemoteRegistry) HeadBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, + readCloser io.ReadCloser, redirectURL string, errs []error, +) { + return r.fetchBlobInternal(ctx2, artInfo.RegIdentifier, http.MethodHead, artInfo) +} + +// TODO (Arvind): There is a known issue where if the remote itself +// is a proxy, then the first pull will fail with error: `error pulling image configuration: +// image config verification failed for digest` and the second pull will succeed. This is a +// known issue and is being worked on. The workaround is to pull the image twice. +func (r *RemoteRegistry) GetBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, readCloser io.ReadCloser, + redirectURL string, errs []error, +) { + return r.fetchBlobInternal(ctx2, artInfo.RegIdentifier, http.MethodGet, artInfo) +} + +func (r *RemoteRegistry) fetchBlobInternal( + ctx context.Context, + repoKey string, + method string, + info pkg.RegistryInfo, +) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, size int64, readCloser io.ReadCloser, + redirectURL string, errs []error, +) { + proxyCtl := proxy2.ControllerInstance(r.local, r.local.ms) + responseHeaders = &commons.ResponseHeaders{ + Headers: make(map[string]string), + } + + log.Ctx(ctx).Info().Msgf("Proxy: %s", repoKey) + + // Handle dockerhub request without library prefix. + isDefault, name, err := defaultLibrary() + if err != nil { + errs = append(errs, err) + return responseHeaders, fr, size, readCloser, redirectURL, errs + } + registryInfo := info + if isDefault { + responseHeaders.Code = http.StatusMovedPermanently + responseHeaders.Headers = map[string]string{ + "Location": defaultManifestURL(repoKey, name, registryInfo), + } + return responseHeaders, fr, size, readCloser, redirectURL, errs + } + + if !canProxy() { + errs = append(errs, errors.New("Blob not found")) + } + + if proxyCtl.UseLocalBlob(ctx, registryInfo) { + switch method { + case http.MethodGet: + headers, reader, s, closer, url, e := r.local.GetBlob(ctx, info) + return headers, reader, s, closer, url, e + case http.MethodHead: + headers, reader, s, closer, url, e := r.local.HeadBlob(ctx, info) + return headers, reader, s, closer, url, e + default: + errs = append(errs, errors.New("Method not supported")) + return responseHeaders, fr, size, readCloser, redirectURL, errs + } + } + + upstreamProxy, err := r.upstreamProxyConfigRepo.GetByRegistryIdentifier(ctx, info.ParentID, repoKey) + if err != nil { + errs = append(errs, err) + } + + // This is start of proxy Code. + size, readCloser, err = proxyCtl.ProxyBlob(ctx, r.secretStore, r.encrypter, registryInfo, repoKey, *upstreamProxy) + if err != nil { + errs = append(errs, err) + return responseHeaders, fr, size, readCloser, redirectURL, errs + } + setHeaders(responseHeaders, size, "", registryInfo.Digest) + return responseHeaders, fr, size, readCloser, redirectURL, errs +} + +func proxyManifestGet( + ctx context.Context, + responseHeaders *commons.ResponseHeaders, + ctl proxy2.Controller, + registryInfo pkg.RegistryInfo, + remote proxy2.RemoteInterface, + repoKey string, + imageName string, + acceptHeader []string, + ifNoneMatchHeader []string, +) (man manifest.Manifest, err error) { + man, err = ctl.ProxyManifest(ctx, registryInfo, remote, repoKey, imageName, acceptHeader, ifNoneMatchHeader) + if err != nil { + return + } + ct, payload, err := man.Payload() + if err != nil { + return + } + setHeaders(responseHeaders, int64(len(payload)), ct, registryInfo.Digest) + return +} + +func setHeaders( + responseHeaders *commons.ResponseHeaders, size int64, + mediaType string, dig string, +) { + responseHeaders.Headers[contentLength] = fmt.Sprintf("%v", size) + if len(mediaType) > 0 { + responseHeaders.Headers[contentType] = mediaType + } + responseHeaders.Headers[dockerContentDigest] = dig + responseHeaders.Headers[etag] = dig +} + +func canProxy() bool { + // TODO Health check. + return true +} + +func (r *RemoteRegistry) PushBlobMonolith( + _ context.Context, + _ pkg.RegistryInfo, + _ int64, + _ io.Reader, +) error { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) InitBlobUpload( + _ context.Context, + _ pkg.RegistryInfo, + _, _ string, +) (*commons.ResponseHeaders, []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) PushBlobMonolithWithDigest( + _ context.Context, + _ pkg.RegistryInfo, + _ int64, + _ io.Reader, +) error { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) PushBlobChunk( + _ *Context, + _ pkg.RegistryInfo, + _ string, + _ string, + _ string, + _ io.ReadCloser, + _ int64, +) (*commons.ResponseHeaders, []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) PushBlob( + _ context.Context, + _ pkg.RegistryInfo, + _ io.ReadCloser, + _ int64, + _ string, +) (*commons.ResponseHeaders, []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) PutManifest( + _ context.Context, + _ pkg.RegistryInfo, + _ string, + _ io.ReadCloser, + _ int64, +) (*commons.ResponseHeaders, []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) ListTags( + _ context.Context, + _ string, + _ int, + _ string, + _ pkg.RegistryInfo, +) (*commons.ResponseHeaders, []string, error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) ListFilteredTags( + _ context.Context, + _ int, + _, _ string, + _ pkg.RegistryInfo, +) (tags []string, err error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) DeleteManifest( + _ context.Context, + _ pkg.RegistryInfo, +) (errs []error, responseHeaders *commons.ResponseHeaders) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) DeleteBlob( + _ *Context, + _ pkg.RegistryInfo, +) (responseHeaders *commons.ResponseHeaders, errs []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) MountBlob( + _ context.Context, + _ pkg.RegistryInfo, + _, _ string, +) (err error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) ListReferrers( + _ context.Context, + _ pkg.RegistryInfo, + _ string, +) (index *v1.Index, responseHeaders *commons.ResponseHeaders, err error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) GetBlobUploadStatus( + _ *Context, + _ pkg.RegistryInfo, + _ string, +) (*commons.ResponseHeaders, []error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) GetCatalog() (repositories []string, err error) { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) DeleteTag( + _, _ string, + _ pkg.RegistryInfo, +) error { + panic("Not implemented yet, will be done during Replication flows") +} + +func (r *RemoteRegistry) PullBlobChunk( + _, _ string, + _, _, _ int64, + _ pkg.RegistryInfo, +) (size int64, blob io.ReadCloser, err error) { + panic( + "Not implemented yet, will be done during Replication flows", + ) +} + +func (r *RemoteRegistry) CanBeMount() (mount bool, repository string, err error) { + panic("Not implemented yet, will be done during Replication flows") +} diff --git a/registry/app/pkg/docker/response.go b/registry/app/pkg/docker/response.go new file mode 100644 index 000000000..12f1eaacc --- /dev/null +++ b/registry/app/pkg/docker/response.go @@ -0,0 +1,71 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "io" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" +) + +type Response interface { + GetErrors() []error +} + +var _ Response = (*GetManifestResponse)(nil) +var _ Response = (*PutManifestResponse)(nil) +var _ Response = (*DeleteManifestResponse)(nil) + +type GetManifestResponse struct { + Errors []error + ResponseHeaders *commons.ResponseHeaders + descriptor manifest.Descriptor + Manifest manifest.Manifest +} + +func (r *GetManifestResponse) GetErrors() []error { + return r.Errors +} + +type PutManifestResponse struct { + Errors []error +} + +func (r *PutManifestResponse) GetErrors() []error { + return r.Errors +} + +type DeleteManifestResponse struct { + Errors []error +} + +func (r *DeleteManifestResponse) GetErrors() []error { + return r.Errors +} + +type GetBlobResponse struct { + Errors []error + ResponseHeaders *commons.ResponseHeaders + Body *storage.FileReader + Size int64 + ReadCloser io.ReadCloser + RedirectURL string +} + +func (r *GetBlobResponse) GetErrors() []error { + return r.Errors +} diff --git a/registry/app/pkg/docker/wire.go b/registry/app/pkg/docker/wire.go new file mode 100644 index 000000000..1a1a94e97 --- /dev/null +++ b/registry/app/pkg/docker/wire.go @@ -0,0 +1,84 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "github.com/harness/gitness/app/auth/authz" + corestore "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/storage" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/gc" + "github.com/harness/gitness/store/database/dbtx" + "github.com/harness/gitness/types" + + "github.com/google/wire" +) + +func LocalRegistryProvider( + app *App, ms ManifestService, blobRepo store.BlobRepository, + registryDao store.RegistryRepository, manifestDao store.ManifestRepository, + registryBlobDao store.RegistryBlobRepository, + mtRepository store.MediaTypesRepository, + tagDao store.TagRepository, artifactDao store.ArtifactRepository, artifactStatDao store.ArtifactStatRepository, + gcService gc.Service, tx dbtx.Transactor, +) *LocalRegistry { + return NewLocalRegistry( + app, ms, manifestDao, registryDao, registryBlobDao, blobRepo, + mtRepository, tagDao, artifactDao, artifactStatDao, gcService, tx, + ).(*LocalRegistry) +} + +func ManifestServiceProvider( + registryDao store.RegistryRepository, + manifestDao store.ManifestRepository, blobRepo store.BlobRepository, mtRepository store.MediaTypesRepository, + manifestRefDao store.ManifestReferenceRepository, tagDao store.TagRepository, artifactDao store.ArtifactRepository, + artifactStatDao store.ArtifactStatRepository, layerDao store.LayerRepository, + gcService gc.Service, tx dbtx.Transactor, +) ManifestService { + return NewManifestService( + registryDao, manifestDao, blobRepo, mtRepository, tagDao, + artifactDao, artifactStatDao, layerDao, manifestRefDao, tx, gcService, + ) +} + +func RemoteRegistryProvider( + local *LocalRegistry, app *App, upstreamProxyConfigRepo store.UpstreamProxyConfigRepository, + secretStore corestore.SecretStore, encrypter encrypt.Encrypter, +) *RemoteRegistry { + return NewRemoteRegistry(local, app, upstreamProxyConfigRepo, secretStore, encrypter).(*RemoteRegistry) +} + +func ControllerProvider( + local *LocalRegistry, + remote *RemoteRegistry, + controller *pkg.CoreController, + spaceStore corestore.SpaceStore, + authorizer authz.Authorizer, +) *Controller { + return NewController(local, remote, controller, spaceStore, authorizer) +} + +func StorageServiceProvider(cfg *types.Config, driver storagedriver.StorageDriver) *storage.Service { + return GetStorageService(cfg, driver) +} + +var ControllerSet = wire.NewSet(ControllerProvider) +var RegistrySet = wire.NewSet(LocalRegistryProvider, ManifestServiceProvider, RemoteRegistryProvider) +var StorageServiceSet = wire.NewSet(StorageServiceProvider) +var AppSet = wire.NewSet(NewApp) +var WireSet = wire.NewSet(ControllerSet, RegistrySet, StorageServiceSet, AppSet, gc.WireSet) diff --git a/registry/app/pkg/wire.go b/registry/app/pkg/wire.go new file mode 100644 index 000000000..73dca54c6 --- /dev/null +++ b/registry/app/pkg/wire.go @@ -0,0 +1,27 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +import ( + "github.com/harness/gitness/registry/app/store" + + "github.com/google/wire" +) + +func CoreControllerProvider(registryDao store.RegistryRepository) *CoreController { + return NewCoreController(registryDao) +} + +var WireSet = wire.NewSet(CoreControllerProvider) diff --git a/registry/app/remote/adapter/adapter.go b/registry/app/remote/adapter/adapter.go new file mode 100644 index 000000000..cdd412461 --- /dev/null +++ b/registry/app/remote/adapter/adapter.go @@ -0,0 +1,112 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "context" + "errors" + "fmt" + "io" + + store2 "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/types" +) + +// const definition. +const ( + MaxConcurrency = 100 +) + +var registry = map[string]Factory{} +var registryKeys = []string{} + +// Factory creates a specific Adapter according to the params. +type Factory interface { + Create( + ctx context.Context, secretStore store2.SecretStore, encrypter encrypt.Encrypter, + record types.UpstreamProxy, + ) (Adapter, error) +} + +// Adapter interface defines the capabilities of registry. +type Adapter interface { + // HealthCheck checks health status of registry. + HealthCheck() (string, error) +} + +// ArtifactRegistry defines the capabilities that an artifact registry should have. +type ArtifactRegistry interface { + ManifestExist(repository, reference string) (exist bool, desc *manifest.Descriptor, err error) + PullManifest( + repository, reference string, + accepttedMediaTypes ...string, + ) (manifest manifest.Manifest, digest string, err error) + PushManifest(repository, reference, mediaType string, payload []byte) (string, error) + DeleteManifest( + repository, reference string, + ) error // the "reference" can be "tag" or "digest", the function needs to handle both + BlobExist(repository, digest string) (exist bool, err error) + PullBlob(repository, digest string) (size int64, blob io.ReadCloser, err error) + PullBlobChunk(repository, digest string, blobSize, start, end int64) (size int64, blob io.ReadCloser, err error) + PushBlobChunk( + repository, digest string, + size int64, + chunk io.Reader, + start, end int64, + location string, + ) (nextUploadLocation string, endRange int64, err error) + PushBlob(repository, digest string, size int64, blob io.Reader) error + MountBlob(srcRepository, digest, dstRepository string) (err error) + CanBeMount( + digest string, + ) (mount bool, repository string, err error) // check whether the blob can be mounted from the remote registry + DeleteTag(repository, tag string) error + ListTags(repository string) (tags []string, err error) +} + +// RegisterFactory registers one adapter factory to the registry. +func RegisterFactory(t string, factory Factory) error { + if len(t) == 0 { + return errors.New("invalid type") + } + if factory == nil { + return errors.New("empty adapter factory") + } + + if _, exist := registry[t]; exist { + return fmt.Errorf("adapter factory for %s already exists", t) + } + registry[t] = factory + registryKeys = append(registryKeys, t) + return nil +} + +// GetFactory gets the adapter factory by the specified name. +func GetFactory(t string) (Factory, error) { + factory, exist := registry[t] + if !exist { + return nil, fmt.Errorf("adapter factory for %s not found", t) + } + return factory, nil +} + +// ListRegisteredAdapterTypes lists the registered Adapter type. +func ListRegisteredAdapterTypes() []string { + return registryKeys +} diff --git a/registry/app/remote/adapter/dockerhub/adapter.go b/registry/app/remote/adapter/dockerhub/adapter.go new file mode 100644 index 000000000..861d1abb5 --- /dev/null +++ b/registry/app/remote/adapter/dockerhub/adapter.go @@ -0,0 +1,82 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerhub + +import ( + "context" + + store2 "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + adp "github.com/harness/gitness/registry/app/remote/adapter" + "github.com/harness/gitness/registry/app/remote/adapter/native" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" +) + +func init() { + adapterType := "docker" + if err := adp.RegisterFactory(adapterType, new(factory)); err != nil { + log.Error().Stack().Err(err).Msgf("Register adapter factory for %s", adapterType) + return + } + log.Info().Msgf("Factory for adapter %s registered", adapterType) +} + +func newAdapter( + ctx context.Context, + secretStore store2.SecretStore, + encrypter encrypt.Encrypter, + registry types.UpstreamProxy, +) (adp.Adapter, error) { + client, err := NewClient(registry) + if err != nil { + return nil, err + } + + // TODO: get Upstream Credentials + return &adapter{ + client: client, + Adapter: native.NewAdapter(ctx, secretStore, encrypter, registry), + }, nil +} + +type factory struct { +} + +// Create ... +func (f *factory) Create( + ctx context.Context, + secretStore store2.SecretStore, + encrypter encrypt.Encrypter, + record types.UpstreamProxy, +) (adp.Adapter, error) { + return newAdapter(ctx, secretStore, encrypter, record) +} + +var ( + _ adp.Adapter = (*adapter)(nil) + _ adp.ArtifactRegistry = (*adapter)(nil) +) + +type adapter struct { + *native.Adapter + client *Client +} + +// Ensure '*adapter' implements interface 'Adapter'. +var _ adp.Adapter = (*adapter)(nil) diff --git a/registry/app/remote/adapter/dockerhub/client.go b/registry/app/remote/adapter/dockerhub/client.go new file mode 100644 index 000000000..2f0ef0230 --- /dev/null +++ b/registry/app/remote/adapter/dockerhub/client.go @@ -0,0 +1,64 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerhub + +import ( + "context" + "fmt" + "io" + "net/http" + + commonhttp "github.com/harness/gitness/registry/app/common/http" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" +) + +// Client is a client to interact with DockerHub. +type Client struct { + client *http.Client + token string + host string + // credential LoginCredential +} + +// NewClient creates a new DockerHub client. +func NewClient(_ types.UpstreamProxy) (*Client, error) { + client := &Client{ + host: registryURL, + client: &http.Client{ + Transport: commonhttp.GetHTTPTransport(commonhttp.WithInsecure(true)), + }, + } + + return client, nil +} + +// Do performs http request to DockerHub, it will set token automatically. +func (c *Client) Do(method, path string, body io.Reader) (*http.Response, error) { + url := baseURL + path + log.Info().Msgf("%s %s", method, url) + req, err := http.NewRequestWithContext(context.TODO(), method, url, body) + if err != nil { + return nil, err + } + if body != nil || method == http.MethodPost || method == http.MethodPut { + req.Header.Set("Content-Type", "application/json") + } + req.Header.Set("Authorization", fmt.Sprintf("%s %s", "Bearer", c.token)) + return c.client.Do(req) +} diff --git a/registry/app/remote/adapter/dockerhub/consts.go b/registry/app/remote/adapter/dockerhub/consts.go new file mode 100644 index 000000000..33209196d --- /dev/null +++ b/registry/app/remote/adapter/dockerhub/consts.go @@ -0,0 +1,45 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerhub + +const ( + baseURL = "https://hub.docker.com" + registryURL = "https://registry-1.docker.io" + loginPath = "/v2/users/login/" + listNamespacePath = "/v2/repositories/namespaces" + createNamespacePath = "/v2/orgs/" +) + +// func getNamespacePath(namespace string) string { +// return fmt.Sprintf("/v2/orgs/%s/", namespace) +// } + +// func listReposPath(namespace, name string, page, pageSize int) string { +// if len(name) == 0 { +// return fmt.Sprintf("/v2/repositories/%s/?page=%d&page_size=%d", namespace, page, pageSize) +// } + +// return fmt.Sprintf("/v2/repositories/%s/?name=%s&page=%d&page_size=%d", namespace, name, page, pageSize) +// } + +// func listTagsPath(namespace, registry string, page, pageSize int) string { +// return fmt.Sprintf("/v2/repositories/%s/%s/tags/?page=%d&page_size=%d", namespace, registry, page, pageSize) +// } + +// func deleteTagPath(namespace, registry, tag string) string { +// return fmt.Sprintf("/v2/repositories/%s/%s/tags/%s/", namespace, registry, tag) +// } diff --git a/registry/app/remote/adapter/dockerhub/types.go b/registry/app/remote/adapter/dockerhub/types.go new file mode 100644 index 000000000..bfb51aa14 --- /dev/null +++ b/registry/app/remote/adapter/dockerhub/types.go @@ -0,0 +1,32 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerhub + +// LoginCredential is request to login. +type LoginCredential struct { + User string `json:"username"` + Password string `json:"password"` +} + +// TokenResp is response of login. +type TokenResp struct { + Token string `json:"token"` +} + +// NamespacesResp is namespace list responsed from DockerHub. +type NamespacesResp struct { + // Namespaces is a list of namespaces + Namespaces []string `json:"namespaces"` +} diff --git a/registry/app/remote/adapter/native/adapter.go b/registry/app/remote/adapter/native/adapter.go new file mode 100644 index 000000000..cc5f2da1a --- /dev/null +++ b/registry/app/remote/adapter/native/adapter.go @@ -0,0 +1,113 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package native + +import ( + "context" + + s "github.com/harness/gitness/app/api/controller/secret" + "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + api "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/common/lib/errors" + adp "github.com/harness/gitness/registry/app/remote/adapter" + "github.com/harness/gitness/registry/app/remote/clients/registry" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" +) + +var _ adp.Adapter = &Adapter{} + +var ( + _ adp.Adapter = (*Adapter)(nil) + _ adp.ArtifactRegistry = (*Adapter)(nil) +) + +// Adapter implements an adapter for Docker proxy. It can be used to all registries +// that implement the proxy V2 API. +type Adapter struct { + proxy types.UpstreamProxy + registry.Client +} + +// NewAdapter returns an instance of the Adapter. +func NewAdapter( + ctx context.Context, + secretStore store.SecretStore, + encrypter encrypt.Encrypter, + reg types.UpstreamProxy, +) *Adapter { + adapter := &Adapter{ + proxy: reg, + } + // Get the password: lookup secrets.secret_data using secret_identifier & secret_space_id. + password := getPwd(ctx, secretStore, encrypter, reg) + username, password, url := reg.UserName, password, reg.RepoURL + adapter.Client = registry.NewClient(url, username, password, false) + return adapter +} + +// getPwd: lookup secrets.secret_data using secret_identifier & secret_space_id. +func getPwd( + ctx context.Context, + secretStore store.SecretStore, + encrypter encrypt.Encrypter, + reg types.UpstreamProxy, +) string { + password := "" + if api.AuthType(reg.RepoAuthType) == api.AuthTypeUserPassword { + secret, err := secretStore.FindByIdentifier(ctx, int64(reg.SecretSpaceID), reg.SecretIdentifier) + if err != nil { + log.Error().Msgf("failed to find secret: %v", err) + } + secret, err = s.Dec(encrypter, secret) + if err != nil { + log.Error().Msgf("could not decrypt secret: %v", err) + } + password = secret.Data + } + return password +} + +// HealthCheck checks health status of a proxy. +func (a *Adapter) HealthCheck() (string, error) { + return "Not implemented", nil +} + +// PingSimple checks whether the proxy is available. It checks the connectivity and certificate (if TLS enabled) +// only, regardless of 401/403 error. +func (a *Adapter) PingSimple() error { + err := a.Ping() + if err == nil { + return nil + } + if errors.IsErr(err, errors.UnAuthorizedCode) || errors.IsErr(err, errors.ForbiddenCode) { + return nil + } + return err +} + +// DeleteTag isn't supported for docker proxy. +func (a *Adapter) DeleteTag(_, _ string) error { + return errors.New("the tag deletion isn't supported") +} + +// CanBeMount isn't supported for docker proxy. +func (a *Adapter) CanBeMount(_ string) (mount bool, repository string, err error) { + return false, "", nil +} diff --git a/registry/app/remote/clients/registry/auth/authorizer.go b/registry/app/remote/clients/registry/auth/authorizer.go new file mode 100644 index 000000000..1ce256108 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/authorizer.go @@ -0,0 +1,143 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + commonhttp "github.com/harness/gitness/registry/app/common/http" + "github.com/harness/gitness/registry/app/common/http/modifier" + "github.com/harness/gitness/registry/app/common/lib" + "github.com/harness/gitness/registry/app/dist_temp/challenge" + "github.com/harness/gitness/registry/app/remote/clients/registry/auth/basic" + "github.com/harness/gitness/registry/app/remote/clients/registry/auth/bearer" + "github.com/harness/gitness/registry/app/remote/clients/registry/auth/null" +) + +// NewAuthorizer creates an authorizer that can handle different auth schemes. +func NewAuthorizer(username, password string, insecure bool) lib.Authorizer { + return &authorizer{ + username: username, + password: password, + client: &http.Client{ + Transport: commonhttp.GetHTTPTransport(commonhttp.WithInsecure(insecure)), + }, + } +} + +// authorizer authorizes the request with the provided credential. +// It determines the auth scheme of registry automatically and calls +// different underlying authorizers to do the auth work. +type authorizer struct { + sync.Mutex + username string + password string + client *http.Client + url *url.URL // registry URL + authorizer modifier.Modifier // the underlying authorizer +} + +func (a *authorizer) Modify(req *http.Request) error { + // Nil underlying authorizer means this is the first time the authorizer is called + // Try to connect to the registry and determine the auth scheme + if a.authorizer == nil { + // to avoid concurrent issue + a.Lock() + defer a.Unlock() + if err := a.initialize(req.URL); err != nil { + return err + } + } + + // check whether the request targets the registry + // If it doesn't, no modification is needed, so we return nil. + if !a.isTarget(req) { + return nil + } + + // If the request targets the registry, delegate the modification to the underlying authorizer. + return a.authorizer.Modify(req) +} + +func (a *authorizer) initialize(u *url.URL) error { + if a.authorizer != nil { + return nil + } + url, err := url.Parse(u.Scheme + "://" + u.Host + "/v2/") + if err != nil { + return fmt.Errorf("failed to parse URL for scheme %s and host %s: %w", u.Scheme, u.Host, err) + } + a.url = url + + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, a.url.String(), nil) + if err != nil { + return err + } + + resp, err := a.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + challenges := challenge.ResponseChallenges(resp) + // no challenge, mean no auth + if len(challenges) == 0 { + a.authorizer = null.NewAuthorizer() + return nil + } + cm := map[string]challenge.Challenge{} + for _, challenge := range challenges { + cm[challenge.Scheme] = challenge + } + if challenge, exist := cm["bearer"]; exist { + a.authorizer = bearer.NewAuthorizer( + challenge.Parameters["realm"], + challenge.Parameters["service"], basic.NewAuthorizer(a.username, a.password), + a.client.Transport, + ) + return nil + } + if _, exist := cm["basic"]; exist { + a.authorizer = basic.NewAuthorizer(a.username, a.password) + return nil + } + return fmt.Errorf("unspported auth scheme: %v", challenges) +} + +// isTarget checks whether the request targets the registry. +// If not, the request shouldn't be handled by the authorizer, e.g., requests sent to backend storage (S3, etc.). +func (a *authorizer) isTarget(req *http.Request) bool { + // Check if the path contains the versioned API endpoint (e.g., "/v2/") + const versionedPath = "/v2/" + if !strings.Contains(req.URL.Path, versionedPath) { + return false + } + + // Ensure that the request's host, scheme, and versioned path match the authorizer's URL. + if req.URL.Host != a.url.Host || req.URL.Scheme != a.url.Scheme || + !strings.HasPrefix(req.URL.Path, a.url.Path) { + return false + } + + return true +} diff --git a/registry/app/remote/clients/registry/auth/basic/authorizer.go b/registry/app/remote/clients/registry/auth/basic/authorizer.go new file mode 100644 index 000000000..b2b51dc27 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/basic/authorizer.go @@ -0,0 +1,43 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package basic + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/common/lib" +) + +// NewAuthorizer return a basic authorizer. +func NewAuthorizer(username, password string) lib.Authorizer { + return &authorizer{ + username: username, + password: password, + } +} + +type authorizer struct { + username string + password string +} + +func (a *authorizer) Modify(req *http.Request) error { + if len(a.username) > 0 { + req.SetBasicAuth(a.username, a.password) + } + return nil +} diff --git a/registry/app/remote/clients/registry/auth/basic/authorizer_test.go b/registry/app/remote/clients/registry/auth/basic/authorizer_test.go new file mode 100644 index 000000000..c5db2ef63 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/basic/authorizer_test.go @@ -0,0 +1,36 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package basic + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestModify(t *testing.T) { + authorizer := NewAuthorizer("u", "p") + req, _ := http.NewRequest(http.MethodGet, "", nil) + err := authorizer.Modify(req) + require.Nil(t, err) + u, p, ok := req.BasicAuth() + require.True(t, ok) + assert.Equal(t, "u", u) + assert.Equal(t, "p", p) +} diff --git a/registry/app/remote/clients/registry/auth/bearer/authorizer.go b/registry/app/remote/clients/registry/auth/bearer/authorizer.go new file mode 100644 index 000000000..bb8d126e6 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/bearer/authorizer.go @@ -0,0 +1,163 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bearer + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/harness/gitness/registry/app/common/lib" + "github.com/harness/gitness/registry/app/common/lib/errors" +) + +const ( + cacheCapacity = 100 + cacheLatency = 10 // second +) + +// NewAuthorizer return a bearer token authorizer +// The parameter "a" is an authorizer used to fetch the token. +func NewAuthorizer(realm, service string, a lib.Authorizer, transport http.RoundTripper) lib.Authorizer { + authorizer := &authorizer{ + realm: realm, + service: service, + authorizer: a, + cache: newCache(cacheCapacity, cacheLatency), + } + + authorizer.client = &http.Client{Transport: transport} + return authorizer +} + +type authorizer struct { + realm string + service string + authorizer lib.Authorizer + cache *cache + client *http.Client +} + +func (a *authorizer) Modify(req *http.Request) error { + // parse scopes from request + scopes := parseScopes(req) + + // get token + token, err := a.getToken(scopes) + if err != nil { + return err + } + + // set authorization header + if token != nil && len(token.Token) > 0 { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Token)) + } + return nil +} + +func (a *authorizer) getToken(scopes []*scope) (*token, error) { + // get token from cache first + token := a.cache.get(scopes) + if token != nil { + return token, nil + } + + // get no token from cache, fetch it from the token service + token, err := a.fetchToken(scopes) + if err != nil { + return nil, err + } + + // set the token into the cache + a.cache.set(scopes, token) + return token, nil +} + +type token struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` // the token returned by azure container registry is called "access_token" + ExpiresIn int `json:"expires_in"` + IssuedAt string `json:"issued_at"` +} + +func (a *authorizer) fetchToken(scopes []*scope) (*token, error) { + url, err := url.Parse(a.realm) + if err != nil { + return nil, err + } + query := url.Query() + query.Add("service", a.service) + for _, scope := range scopes { + query.Add("scope", scope.String()) + } + url.RawQuery = query.Encode() + + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url.String(), nil) + if err != nil { + return nil, err + } + if a.authorizer != nil { + if err = a.authorizer.Modify(req); err != nil { + return nil, err + } + } + + resp, err := a.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + token := &token{} + switch resp.StatusCode { + case http.StatusOK: + return getToken(body, token) + case http.StatusUnauthorized: + return nil, fmt.Errorf("request with body :%s : %s", string(body), errors.UnAuthorizedCode) + case http.StatusForbidden: + return nil, fmt.Errorf("request with body :%s : %s", string(body), errors.ForbiddenCode) + default: + return nil, fmt.Errorf( + "failed to fetch token for request with body %s, status code %d", + string(body), + resp.StatusCode, + ) + } +} + +// getToken unmarshals the provided JSON-encoded body into the given token struct. +// If the "Token" field is empty but the "AccessToken" field is populated, it assigns "AccessToken" to "Token". +// It returns the updated token struct and any error encountered during unmarshalling. +func getToken(body []byte, t *token) (*token, error) { + // Unmarshal the JSON body into the token struct + if err := json.Unmarshal(body, t); err != nil { + return nil, fmt.Errorf("failed to unmarshal token: %w", err) + } + + // If Token is empty and AccessToken is provided, assign AccessToken to Token + if t.Token == "" && t.AccessToken != "" { + t.Token = t.AccessToken + } + + return t, nil +} diff --git a/registry/app/remote/clients/registry/auth/bearer/cache.go b/registry/app/remote/clients/registry/auth/bearer/cache.go new file mode 100644 index 000000000..7c049a79b --- /dev/null +++ b/registry/app/remote/clients/registry/auth/bearer/cache.go @@ -0,0 +1,119 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bearer + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/rs/zerolog/log" +) + +// newCache initializes a new cache with the specified capacity and latency. +// The latency is the delay applied to cache operations (if applicable). +func newCache(capacity int, latency int) *cache { + return &cache{ + latency: latency, + capacity: capacity, + cache: map[string]*token{}, + } +} + +type cache struct { + sync.RWMutex + latency int // second, the network latency in case that when the + // token is checked it doesn't expire but it does when used. + capacity int // the capacity of the cache map. + cache map[string]*token +} + +func (c *cache) get(scopes []*scope) *token { + c.RLock() + defer c.RUnlock() + token := c.cache[c.key(scopes)] + if token == nil { + return nil + } + expired, _ := c.expired(token) + if expired { + token = nil + } + return token +} + +func (c *cache) set(scopes []*scope, token *token) { + c.Lock() + defer c.Unlock() + // exceed the capacity, empty some elements: all expired token will be removed, + // if no expired token, move the earliest one. + if len(c.cache) >= c.capacity { + var candidates []string + var earliestKey string + var earliestExpireTime time.Time + for key, value := range c.cache { + expired, expireAt := c.expired(value) + // expired. + if expired { + candidates = append(candidates, key) + continue + } + // doesn't expired. + if len(earliestKey) == 0 || expireAt.Before(earliestExpireTime) { + earliestKey = key + earliestExpireTime = expireAt + continue + } + } + if len(candidates) == 0 { + candidates = append(candidates, earliestKey) + } + for _, candidate := range candidates { + delete(c.cache, candidate) + } + } + c.cache[c.key(scopes)] = token +} + +func (c *cache) key(scopes []*scope) string { + var strs []string + for _, scope := range scopes { + strs = append(strs, scope.String()) + } + return strings.Join(strs, "#") +} + +// return whether the token is expired or not and the expired time. +func (c *cache) expired(token *token) (bool, time.Time) { + // check time whether empty. + if len(token.IssuedAt) == 0 { + log.Warn().Msg("token issued time is empty, return expired to refresh token") + return true, time.Time{} + } + + issueAt, err := time.Parse(time.RFC3339, token.IssuedAt) + if err != nil { + log.Error(). + Stack(). + Err(err). + Msg(fmt.Sprintf("failed to parse the issued at time of token %s: %v", token.IssuedAt, err)) + return true, time.Time{} + } + expireAt := issueAt.Add(time.Duration(token.ExpiresIn-c.latency) * time.Second) + return expireAt.Before(time.Now()), expireAt +} diff --git a/registry/app/remote/clients/registry/auth/bearer/scope.go b/registry/app/remote/clients/registry/auth/bearer/scope.go new file mode 100644 index 000000000..24070c067 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/bearer/scope.go @@ -0,0 +1,123 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bearer + +import ( + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/distribution/reference" +) + +const ( + scopeTypeRegistry = "registry" + scopeTypeRepository = "repository" + scopeActionPull = "pull" + scopeActionPush = "push" + scopeActionAll = "*" +) + +const v2 = "/v2/(" + +var ( + catalog = regexp.MustCompile("/v2/_catalog$") + tag = regexp.MustCompile(v2 + reference.NameRegexp.String() + ")/tags/list") + manifest = regexp.MustCompile( + v2 + + reference.NameRegexp.String() + + ")/manifests/(" + + reference.TagRegexp.String() + + "|" + reference.DigestRegexp.String() + ")", + ) + blob = regexp.MustCompile( + v2 + reference.NameRegexp.String() + ")/blobs/" + reference.DigestRegexp.String(), + ) + blobUpload = regexp.MustCompile(v2 + reference.NameRegexp.String() + ")/blobs/uploads") +) + +type scope struct { + Type string + Name string + Actions []string +} + +func (s *scope) String() string { + return fmt.Sprintf("%s:%s:%s", s.Type, s.Name, strings.Join(s.Actions, ",")) +} + +func parseScopes(req *http.Request) []*scope { + path := strings.TrimRight(req.URL.Path, "/") + var scopes []*scope + repository := "" + // manifest + if subs := manifest.FindStringSubmatch(path); len(subs) >= 2 { + // manifest + repository = subs[1] + } else if subs1 := blob.FindStringSubmatch(path); len(subs1) >= 2 { + // blob + repository = subs1[1] + } else if subs2 := blobUpload.FindStringSubmatch(path); len(subs2) >= 2 { + // blob upload + repository = subs2[1] + // blob mount + from := req.URL.Query().Get("from") + if len(from) > 0 { + scopes = append( + scopes, &scope{ + Type: scopeTypeRepository, + Name: from, + Actions: []string{scopeActionPull}, + }, + ) + } + } else if subs3 := tag.FindStringSubmatch(path); len(subs3) >= 2 { + // tag + repository = subs3[1] + } + if len(repository) > 0 { + scp := &scope{ + Type: scopeTypeRepository, + Name: repository, + } + switch req.Method { + case http.MethodGet, http.MethodHead: + scp.Actions = []string{scopeActionPull} + case http.MethodPost, http.MethodPut, http.MethodPatch: + scp.Actions = []string{scopeActionPull, scopeActionPush} + case http.MethodDelete: + scp.Actions = []string{scopeActionAll} + } + scopes = append(scopes, scp) + return scopes + } + + // catalog + if catalog.MatchString(path) { + return []*scope{ + { + Type: scopeTypeRegistry, + Name: "catalog", + Actions: []string{scopeActionAll}, + }, + } + } + + // base or no match, return nil + return nil +} diff --git a/registry/app/remote/clients/registry/auth/null/authorizer.go b/registry/app/remote/clients/registry/auth/null/authorizer.go new file mode 100644 index 000000000..d41578266 --- /dev/null +++ b/registry/app/remote/clients/registry/auth/null/authorizer.go @@ -0,0 +1,35 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package null + +import ( + "net/http" + + "github.com/harness/gitness/registry/app/common/lib" +) + +// NewAuthorizer returns a null authorizer. +func NewAuthorizer() lib.Authorizer { + return &authorizer{} +} + +type authorizer struct{} + +func (a *authorizer) Modify(_ *http.Request) error { + // do nothing + return nil +} diff --git a/registry/app/remote/clients/registry/client.go b/registry/app/remote/clients/registry/client.go new file mode 100644 index 000000000..fae0eec51 --- /dev/null +++ b/registry/app/remote/clients/registry/client.go @@ -0,0 +1,825 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + commonhttp "github.com/harness/gitness/registry/app/common/http" + "github.com/harness/gitness/registry/app/common/lib" + "github.com/harness/gitness/registry/app/common/lib/errors" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/manifest/manifestlist" + "github.com/harness/gitness/registry/app/manifest/schema2" + "github.com/harness/gitness/registry/app/remote/clients/registry/auth" + "github.com/harness/gitness/registry/app/remote/clients/registry/interceptor" + + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog/log" + + _ "github.com/harness/gitness/registry/app/manifest/ocischema" // register oci manifest unmarshal function +) + +var ( + // Cli is the global registry client instance, it targets to the backend docker registry. + Cli = func() Client { + url := "myurl" + username, password := "myusername", "mypassword" + // url, _ := config.RegistryURL() + // username, password := config.RegistryCredential() + return NewClient(url, username, password, false) + }() + + accepts = []string{ + v1.MediaTypeImageIndex, + manifestlist.MediaTypeManifestList, + v1.MediaTypeImageManifest, + schema2.MediaTypeManifest, + MediaTypeSignedManifest, + MediaTypeManifest, + } +) + +// const definition. +const ( + UserAgent = "harbor-registry-client" + // DefaultHTTPClientTimeout is the default timeout for registry http client. + DefaultHTTPClientTimeout = 30 * time.Minute + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version. + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers. + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // registryHTTPClientTimeout is the timeout for registry http client. + registryHTTPClientTimeout time.Duration +) + +func init() { + registryHTTPClientTimeout = DefaultHTTPClientTimeout + // override it if read from environment variable, in minutes + if env := os.Getenv("GITNESS_REGISTRY_HTTP_CLIENT_TIMEOUT"); len(env) > 0 { + timeout, err := strconv.ParseInt(env, 10, 64) + if err != nil { + log.Error(). + Stack(). + Err(err). + Msg( + fmt.Sprintf( + "Failed to parse GITNESS_REGISTRY_HTTP_CLIENT_TIMEOUT: %v, use default value: %v", + err, DefaultHTTPClientTimeout, + ), + ) + } else if timeout > 0 { + registryHTTPClientTimeout = time.Duration(timeout) * time.Minute + } + } +} + +// Client defines the methods that a registry client should implements. +type Client interface { + // Ping the base API endpoint "/v2/" + Ping() (err error) + // Catalog the repositories + Catalog() (repositories []string, err error) + // ListTags lists the tags under the specified repository + ListTags(repository string) (tags []string, err error) + // ManifestExist checks the existence of the manifest + ManifestExist(repository, reference string) (exist bool, desc *manifest.Descriptor, err error) + // PullManifest pulls the specified manifest + PullManifest( + repository, reference string, + acceptedMediaTypes ...string, + ) (manifest manifest.Manifest, digest string, err error) + // PushManifest pushes the specified manifest + PushManifest(repository, reference, mediaType string, payload []byte) (digest string, err error) + // DeleteManifest deletes the specified manifest. The "reference" can be "tag" or "digest" + DeleteManifest(repository, reference string) (err error) + // BlobExist checks the existence of the specified blob + BlobExist(repository, digest string) (exist bool, err error) + // PullBlob pulls the specified blob. The caller must close the returned "blob" + PullBlob(repository, digest string) (size int64, blob io.ReadCloser, err error) + // PullBlobChunk pulls the specified blob, but by chunked + PullBlobChunk(repository, digest string, blobSize, start, end int64) (size int64, blob io.ReadCloser, err error) + // PushBlob pushes the specified blob + PushBlob(repository, digest string, size int64, blob io.Reader) error + // PushBlobChunk pushes the specified blob, but by chunked + PushBlobChunk( + repository, digest string, + blobSize int64, + chunk io.Reader, + start, end int64, + location string, + ) (nextUploadLocation string, endRange int64, err error) + // MountBlob mounts the blob from the source repository + MountBlob(srcRepository, digest, dstRepository string) (err error) + // DeleteBlob deletes the specified blob + DeleteBlob(repository, digest string) (err error) + // Copy the artifact from source repository to the destination. The "override" + // is used to specify whether the destination artifact will be overridden if + // its name is same with source but digest isn't + Copy(srcRepository, srcReference, dstRepository, dstReference string, override bool) (err error) + // Do send generic HTTP requests to the target registry service + Do(req *http.Request) (*http.Response, error) +} + +// NewClient creates a registry client with the default authorizer which determines the auth scheme +// of the registry automatically and calls the corresponding underlying authorizers(basic/bearer) to +// do the auth work. If a customized authorizer is needed, use "NewClientWithAuthorizer" instead. +func NewClient(url, username, password string, insecure bool, interceptors ...interceptor.Interceptor) Client { + authorizer := auth.NewAuthorizer(username, password, insecure) + return NewClientWithAuthorizer(url, authorizer, insecure, interceptors...) +} + +// NewClientWithAuthorizer creates a registry client with the provided authorizer. +func NewClientWithAuthorizer( + url string, + authorizer lib.Authorizer, + insecure bool, + interceptors ...interceptor.Interceptor, +) Client { + return &client{ + url: url, + authorizer: authorizer, + interceptors: interceptors, + client: &http.Client{ + Transport: commonhttp.GetHTTPTransport(commonhttp.WithInsecure(insecure)), + Timeout: registryHTTPClientTimeout, + }, + } +} + +type client struct { + url string + authorizer lib.Authorizer + interceptors []interceptor.Interceptor + client *http.Client +} + +func (c *client) Ping() error { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, buildPingURL(c.url), nil) + if err != nil { + return err + } + resp, err := c.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (c *client) Catalog() ([]string, error) { + var repositories []string + url := buildCatalogURL(c.url) + for { + repos, next, err := c.catalog(url) + if err != nil { + return nil, err + } + repositories = append(repositories, repos...) + + url = next + // no next page, end the loop + if len(url) == 0 { + break + } + // relative URL + if !strings.Contains(url, "://") { + url = c.url + url + } + } + return repositories, nil +} + +func (c *client) catalog(url string) ([]string, string, error) { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) + if err != nil { + return nil, "", err + } + resp, err := c.do(req) + if err != nil { + return nil, "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, "", err + } + repositories := struct { + Repositories []string `json:"repositories"` + }{} + if err := json.Unmarshal(body, &repositories); err != nil { + return nil, "", err + } + return repositories.Repositories, next(resp.Header.Get("Link")), nil +} + +func (c *client) ListTags(repository string) ([]string, error) { + var tags []string + url := buildTagListURL(c.url, repository) + for { + tgs, next, err := c.listTags(url) + if err != nil { + return nil, err + } + tags = append(tags, tgs...) + + url = next + // no next page, end the loop + if len(url) == 0 { + break + } + // relative URL + if !strings.Contains(url, "://") { + url = c.url + url + } + } + return tags, nil +} + +func (c *client) listTags(url string) ([]string, string, error) { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) + if err != nil { + return nil, "", err + } + resp, err := c.do(req) + if err != nil { + return nil, "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, "", err + } + tgs := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(body, &tgs); err != nil { + return nil, "", err + } + return tgs.Tags, next(resp.Header.Get("Link")), nil +} + +func (c *client) ManifestExist(repository, reference string) (bool, *manifest.Descriptor, error) { + req, err := http.NewRequestWithContext( + context.TODO(), + http.MethodHead, buildManifestURL(c.url, repository, reference), nil, + ) + if err != nil { + return false, nil, err + } + for _, mediaType := range accepts { + req.Header.Add("Accept", mediaType) + } + resp, err := c.do(req) + if err != nil { + if errors.IsErr(err, errors.NotFoundCode) { + return false, nil, nil + } + return false, nil, err + } + defer resp.Body.Close() + dig := resp.Header.Get("Docker-Content-Digest") + contentType := resp.Header.Get("Content-Type") + contentLen := resp.Header.Get("Content-Length") + length, _ := strconv.Atoi(contentLen) + return true, &manifest.Descriptor{Digest: digest.Digest(dig), MediaType: contentType, Size: int64(length)}, nil +} + +func (c *client) PullManifest(repository, reference string, acceptedMediaTypes ...string) ( + manifest.Manifest, string, error, +) { + req, err := http.NewRequestWithContext( + context.TODO(), + http.MethodGet, buildManifestURL( + c.url, repository, + reference, + ), nil, + ) + if err != nil { + return nil, "", err + } + if len(acceptedMediaTypes) == 0 { + acceptedMediaTypes = accepts + } + for _, mediaType := range acceptedMediaTypes { + req.Header.Add("Accept", mediaType) + } + resp, err := c.do(req) + if err != nil { + return nil, "", err + } + defer resp.Body.Close() + payload, err := io.ReadAll(resp.Body) + if err != nil { + return nil, "", err + } + mediaType := resp.Header.Get("Content-Type") + manifest, _, err := manifest.UnmarshalManifest(mediaType, payload) + if err != nil { + return nil, "", err + } + digest := resp.Header.Get("Docker-Content-Digest") + return manifest, digest, nil +} + +func (c *client) PushManifest(repository, reference, mediaType string, payload []byte) (string, error) { + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodPut, buildManifestURL(c.url, repository, reference), + bytes.NewReader(payload), + ) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", mediaType) + resp, err := c.do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + return resp.Header.Get("Docker-Content-Digest"), nil +} + +func (c *client) DeleteManifest(repository, reference string) error { + _, err := digest.Parse(reference) + if err != nil { + // the reference is tag, get the digest first + exist, desc, err := c.ManifestExist(repository, reference) + if err != nil { + return err + } + if !exist { + return errors.New(nil).WithCode(errors.NotFoundCode). + WithMessage("%s:%s not found", repository, reference) + } + reference = string(desc.Digest) + } + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodDelete, + buildManifestURL(c.url, repository, reference), nil, + ) + if err != nil { + return err + } + resp, err := c.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (c *client) BlobExist(repository, digest string) (bool, error) { + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodHead, buildBlobURL(c.url, repository, digest), nil, + ) + if err != nil { + return false, err + } + resp, err := c.do(req) + if err != nil { + if errors.IsErr(err, errors.NotFoundCode) { + return false, nil + } + return false, err + } + defer resp.Body.Close() + return true, nil +} + +func (c *client) PullBlob(repository, digest string) (int64, io.ReadCloser, error) { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, buildBlobURL(c.url, repository, digest), nil) + if err != nil { + return 0, nil, err + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := c.do(req) + if err != nil { + return 0, nil, err + } + + var size int64 + n := resp.Header.Get("Content-Length") + // no content-length is acceptable, which can taken from manifests + if len(n) > 0 { + size, err = strconv.ParseInt(n, 10, 64) + if err != nil { + defer resp.Body.Close() + return 0, nil, err + } + } + + return size, resp.Body, nil +} + +// PullBlobChunk pulls the specified blob, but by chunked, refer to +// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pull +// for more details. +func (c *client) PullBlobChunk(repository, digest string, _ int64, start, end int64) (int64, io.ReadCloser, error) { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, buildBlobURL(c.url, repository, digest), nil) + if err != nil { + return 0, nil, err + } + + req.Header.Add("Accept-Encoding", "identity") + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + resp, err := c.do(req) + if err != nil { + return 0, nil, err + } + + var size int64 + n := resp.Header.Get("Content-Length") + // no content-length is acceptable, which can taken from manifests + if len(n) > 0 { + size, err = strconv.ParseInt(n, 10, 64) + if err != nil { + defer resp.Body.Close() + return 0, nil, err + } + } + + return size, resp.Body, nil +} + +func (c *client) PushBlob(repository, digest string, size int64, blob io.Reader) error { + location, err := c.initiateBlobUpload(repository) + if err != nil { + return err + } + return c.monolithicBlobUpload(location, digest, size, blob) +} + +// PushBlobChunk pushes the specified blob, but by chunked, +// refer to https://github.com/opencontainers/distribution-spec/blob/main/spec.md#push +// for more details. +func (c *client) PushBlobChunk( + repository, digest string, + blobSize int64, + chunk io.Reader, + start, end int64, + location string, +) (string, int64, error) { + var err error + // first chunk need to initialize blob upload location + if start == 0 { + location, err = c.initiateBlobUpload(repository) + if err != nil { + return location, end, err + } + } + + // the range is from 0 to (blobSize-1), so (end == blobSize-1) means it is last chunk + lastChunk := end == blobSize-1 + url, err := buildChunkBlobUploadURL(c.url, location, digest, lastChunk) + if err != nil { + return location, end, err + } + + // use PUT instead of PATCH for last chunk which can reduce a final request + method := http.MethodPatch + if lastChunk { + method = http.MethodPut + } + req, err := http.NewRequestWithContext(context.TODO(), method, url, chunk) + if err != nil { + return location, end, err + } + + req.Header.Set("Content-Length", fmt.Sprintf("%d", end-start+1)) + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", start, end)) + resp, err := c.do(req) + if err != nil { + // if push chunk error, we should query the upload progress for new location and end range. + newLocation, newEnd, err1 := c.getUploadStatus(location) + if err1 == nil { + return newLocation, newEnd, err + } + // end should return start-1 to re-push this chunk + return location, start - 1, fmt.Errorf("failed to get upload status: %w", err1) + } + + defer resp.Body.Close() + // return the location for next chunk upload + return resp.Header.Get("Location"), end, nil +} + +func (c *client) getUploadStatus(location string) (string, int64, error) { + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, location, nil) + if err != nil { + return location, -1, err + } + + resp, err := c.do(req) + if err != nil { + return location, -1, err + } + + defer resp.Body.Close() + + _, end, err := parseContentRange(resp.Header.Get("Range")) + if err != nil { + return location, -1, err + } + + return resp.Header.Get("Location"), end, nil +} + +func parseContentRange(cr string) (int64, int64, error) { + ranges := strings.Split(cr, "-") + if len(ranges) != 2 { + return -1, -1, fmt.Errorf("invalid content range format, %s", cr) + } + start, err := strconv.ParseInt(ranges[0], 10, 64) + if err != nil { + return -1, -1, err + } + end, err := strconv.ParseInt(ranges[1], 10, 64) + if err != nil { + return -1, -1, err + } + + return start, end, nil +} + +func (c *client) initiateBlobUpload(repository string) (string, error) { + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodPost, + buildInitiateBlobUploadURL(c.url, repository), nil, + ) + if err != nil { + return "", err + } + req.Header.Set("Content-Length", "0") + resp, err := c.do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + return resp.Header.Get("Location"), nil +} + +func (c *client) monolithicBlobUpload(location, digest string, size int64, data io.Reader) error { + url, err := buildMonolithicBlobUploadURL(c.url, location, digest) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(context.TODO(), http.MethodPut, url, data) + if err != nil { + return err + } + req.ContentLength = size + resp, err := c.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (c *client) MountBlob(srcRepository, digest, dstRepository string) error { + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodPost, + buildMountBlobURL(c.url, dstRepository, digest, srcRepository), nil, + ) + if err != nil { + return err + } + req.Header.Set("Content-Length", "0") + resp, err := c.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (c *client) DeleteBlob(repository, digest string) error { + req, err := http.NewRequestWithContext( + context.TODO(), http.MethodDelete, buildBlobURL(c.url, repository, digest), nil, + ) + if err != nil { + return err + } + resp, err := c.do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (c *client) Copy(srcRepo, srcRef, dstRepo, dstRef string, override bool) error { + // pull the manifest from the source repository + manifest, srcDgt, err := c.PullManifest(srcRepo, srcRef) + if err != nil { + return err + } + + // check the existence of the artifact on the destination repository + blobExist, desc, err := c.ManifestExist(dstRepo, dstRef) + if err != nil { + return err + } + if blobExist { + // the same artifact already exists + if desc != nil && srcDgt == string(desc.Digest) { + return nil + } + // the same name artifact exists, but not allowed to override + if !override { + return errors.New(nil).WithCode(errors.PreconditionCode). + WithMessage("the same name but different digest artifact exists, but the override is set to false") + } + } + + for _, descriptor := range manifest.References() { + digest := descriptor.Digest.String() + switch descriptor.MediaType { + // skip foreign layer + case schema2.MediaTypeForeignLayer: + continue + // manifest or index + case v1.MediaTypeImageIndex, manifestlist.MediaTypeManifestList, + v1.MediaTypeImageManifest, schema2.MediaTypeManifest, + MediaTypeSignedManifest, MediaTypeManifest: + if err = c.Copy(srcRepo, digest, dstRepo, digest, false); err != nil { + return err + } + // common layer + default: + blobExist, err = c.BlobExist(dstRepo, digest) + if err != nil { + return err + } + // the layer already exist, skip + if blobExist { + continue + } + // when the copy happens inside the same registry, use mount + if err = c.MountBlob(srcRepo, digest, dstRepo); err != nil { + return err + } + } + } + + mediaType, payload, err := manifest.Payload() + if err != nil { + return err + } + // push manifest to the destination repository + if _, err = c.PushManifest(dstRepo, dstRef, mediaType, payload); err != nil { + return err + } + + return nil +} + +func (c *client) Do(req *http.Request) (*http.Response, error) { + return c.do(req) +} + +func (c *client) do(req *http.Request) (*http.Response, error) { + for _, interceptor := range c.interceptors { + if err := interceptor.Intercept(req); err != nil { + return nil, err + } + } + if c.authorizer != nil { + if err := c.authorizer.Modify(req); err != nil { + return nil, err + } + } + req.Header.Set("User-Agent", UserAgent) + log.Info().Msgf("[Remote Call]: Request: %s %s", req.Method, req.URL.String()) + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + message := fmt.Sprintf("http status code: %d, body: %s", resp.StatusCode, string(body)) + code := errors.GeneralCode + switch resp.StatusCode { + case http.StatusUnauthorized: + code = errors.UnAuthorizedCode + case http.StatusForbidden: + code = errors.ForbiddenCode + case http.StatusNotFound: + code = errors.NotFoundCode + case http.StatusTooManyRequests: + code = errors.RateLimitCode + } + return nil, errors.New(nil).WithCode(code). + WithMessage(message) + } + return resp, nil +} + +// parse the next page link from the link header. +func next(link string) string { + links := lib.ParseLinks(link) + for _, lk := range links { + if lk.Rel == "next" { + return lk.URL + } + } + return "" +} + +func buildPingURL(endpoint string) string { + return fmt.Sprintf("%s/v2/", endpoint) +} + +func buildCatalogURL(endpoint string) string { + return fmt.Sprintf("%s/v2/_catalog?n=1000", endpoint) +} + +func buildTagListURL(endpoint, repository string) string { + return fmt.Sprintf("%s/v2/%s/tags/list", endpoint, repository) +} + +func buildManifestURL(endpoint, repository, reference string) string { + return fmt.Sprintf("%s/v2/%s/manifests/%s", endpoint, repository, reference) +} + +func buildBlobURL(endpoint, repository, reference string) string { + return fmt.Sprintf("%s/v2/%s/blobs/%s", endpoint, repository, reference) +} + +func buildMountBlobURL(endpoint, repository, digest, from string) string { + return fmt.Sprintf("%s/v2/%s/blobs/uploads/?mount=%s&from=%s", endpoint, repository, digest, from) +} + +func buildInitiateBlobUploadURL(endpoint, repository string) string { + return fmt.Sprintf("%s/v2/%s/blobs/uploads/", endpoint, repository) +} + +func buildChunkBlobUploadURL(endpoint, location, digest string, lastChunk bool) (string, error) { + url, err := url.Parse(location) + if err != nil { + return "", err + } + q := url.Query() + if lastChunk { + q.Set("digest", digest) + } + url.RawQuery = q.Encode() + if url.IsAbs() { + return url.String(), nil + } + // the "relativeurls" is enabled in registry + return endpoint + url.String(), nil +} + +func buildMonolithicBlobUploadURL(endpoint, location, digest string) (string, error) { + url, err := url.Parse(location) + if err != nil { + return "", err + } + q := url.Query() + q.Set("digest", digest) + url.RawQuery = q.Encode() + if url.IsAbs() { + return url.String(), nil + } + // the "relativeurls" is enabled in registry + return endpoint + url.String(), nil +} diff --git a/registry/app/remote/clients/registry/interceptor/interceptor.go b/registry/app/remote/clients/registry/interceptor/interceptor.go new file mode 100644 index 000000000..6804c38fb --- /dev/null +++ b/registry/app/remote/clients/registry/interceptor/interceptor.go @@ -0,0 +1,24 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interceptor + +import "net/http" + +// Interceptor intercepts the request. +type Interceptor interface { + Intercept(req *http.Request) error +} diff --git a/registry/app/remote/controller/proxy/controller.go b/registry/app/remote/controller/proxy/controller.go new file mode 100644 index 000000000..0792bfd61 --- /dev/null +++ b/registry/app/remote/controller/proxy/controller.go @@ -0,0 +1,377 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "bytes" + "context" + "fmt" + "io" + "net/url" + "sync" + "time" + + "github.com/harness/gitness/app/api/request" + store2 "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + "github.com/harness/gitness/registry/app/common/lib/errors" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/types" + + "github.com/distribution/distribution/v3/registry/api/errcode" + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +const ( + // wait more time than manifest (maxManifestWait) because manifest list depends on manifest ready. + maxManifestListWait = 20 + maxManifestWait = 10 + sleepIntervalSec = 20 + // keep manifest list in cache for one week. +) + +var ( + // Ctl is a global proxy controller instance. + ctl Controller + once sync.Once +) + +// Controller defines the operations related with pull through proxy. +type Controller interface { + // UseLocalBlob check if the blob should use localRegistry copy. + UseLocalBlob(ctx context.Context, art pkg.RegistryInfo) bool + // UseLocalManifest check manifest should use localRegistry copy + UseLocalManifest( + ctx context.Context, + art pkg.RegistryInfo, + remote RemoteInterface, + acceptHeader []string, + ifNoneMatchHeader []string, + ) (bool, *ManifestList, error) + // ProxyBlob proxy the blob request to the remote server, p is the proxy project + // art is the RegistryInfo which includes the digest of the blob + ProxyBlob( + ctx context.Context, + secretStore store2.SecretStore, + encrypter encrypt.Encrypter, + art pkg.RegistryInfo, + repoKey string, + proxy types.UpstreamProxy, + ) (int64, io.ReadCloser, error) + // ProxyManifest proxy the manifest request to the remote server, p is the proxy project, + // art is the RegistryInfo which includes the tag or digest of the manifest + ProxyManifest( + ctx context.Context, + art pkg.RegistryInfo, + remote RemoteInterface, + repoKey string, + imageName string, + acceptHeader []string, + ifNoneMatchHeader []string, + ) (manifest.Manifest, error) + // HeadManifest send manifest head request to the remote server + HeadManifest(ctx context.Context, art pkg.RegistryInfo, remote RemoteInterface) (bool, *manifest.Descriptor, error) + // EnsureTag ensure tag for digest + EnsureTag( + ctx context.Context, + rsHeaders *commons.ResponseHeaders, + info pkg.RegistryInfo, + acceptHeader []string, + ifNoneMatchHeader []string, + ) error +} + +type controller struct { + // blobCtl blob.Controller + // artifactCtl artifact.Controller. + localRegistry registryInterface + localManifestRegistry registryManifestInterface + // cache cache.Cache + // handlerRegistry map[string]ManifestCacheHandler. +} + +// ControllerInstance -- get the proxy controller instance. +func ControllerInstance(l registryInterface, lm registryManifestInterface) Controller { + once.Do( + func() { + ctl = &controller{ + localRegistry: l, + localManifestRegistry: lm, + } + }, + ) + + return ctl +} + +func (c *controller) EnsureTag( + ctx context.Context, + rsHeaders *commons.ResponseHeaders, + info pkg.RegistryInfo, + acceptHeader []string, + ifNoneMatchHeader []string, +) error { + // search the digest in cache and query with trimmed digest + + _, desc, mfst, err := c.localRegistry.PullManifest(ctx, info, acceptHeader, ifNoneMatchHeader) + if len(err) > 0 { + return err[0] + } + + //Fixme: Need to properly pick tag. + e := c.localManifestRegistry.DBTag(ctx, mfst, desc.Digest, info.Reference, info.RegIdentifier, rsHeaders, info) + if e != nil { + log.Error().Err(e).Msgf("Error in ensuring tag: %s", e) + } + return e +} + +func (c *controller) UseLocalBlob(ctx context.Context, art pkg.RegistryInfo) bool { + if len(art.Digest) == 0 { + return false + } + // TODO: Get from Local storage. + _, _, _, _, _, e := c.localRegistry.GetBlob(ctx, art) + return e == nil +} + +// ManifestList ... +type ManifestList struct { + Content []byte + Digest string + ContentType string +} + +// UseLocalManifest check if these manifest could be found in localRegistry registry, +// the return error should be nil when it is not found in localRegistry and +// need to delegate to remote registry +// the return error should be NotFoundError when it is not found in remote registry +// the error will be captured by framework and return 404 to client. +func (c *controller) UseLocalManifest( + ctx context.Context, + art pkg.RegistryInfo, + remote RemoteInterface, + acceptHeaders []string, + ifNoneMatchHeader []string, +) (bool, *ManifestList, error) { + // TODO: get from DB + _, d, man, e := c.localRegistry.PullManifest(ctx, art, acceptHeaders, ifNoneMatchHeader) + if len(e) > 0 { + return false, nil, nil + } + + remoteRepo := getRemoteRepo(art) + exist, desc, err := remote.ManifestExist(remoteRepo, getReference(art)) // HEAD. + log.Info().Msgf("Manifest exist: %t %s %d %s", exist, desc.Digest.String(), desc.Size, desc.MediaType) + // TODO: Check for rate limit error. + if err != nil { + if errors.IsRateLimitError(err) { // if rate limit, use localRegistry if it exists, otherwise return error. + return true, nil, nil + } + return false, nil, err + } + + // TODO: Delete if does not exist on remote. + if !exist || desc == nil { + go func() { + c.localRegistry.DeleteManifest(ctx, art) + }() + return false, nil, errors.NotFoundError(fmt.Errorf("registry %v, tag %v not found", art.RegIdentifier, art.Tag)) + } + + log.Info().Msgf("Manifest: %s %s", man, getReference(art)) + mediaType, payload, _ := man.Payload() + + return true, &ManifestList{payload, d.Digest.String(), mediaType}, nil +} + +func ByteToReadCloser(b []byte) io.ReadCloser { + reader := bytes.NewReader(b) + readCloser := io.NopCloser(reader) + return readCloser +} + +func (c *controller) ProxyManifest( + ctx context.Context, + art pkg.RegistryInfo, + remote RemoteInterface, + repoKey string, + imageName string, + acceptHeader []string, + ifNoneMatchHeader []string, +) (manifest.Manifest, error) { + var man manifest.Manifest + remoteRepo := getRemoteRepo(art) + ref := getReference(art) + man, dig, err := remote.Manifest(remoteRepo, ref) + if err != nil { + if errors.IsNotFoundErr(err) { + log.Info().Msgf("TODO: Delete manifest %s from localRegistry registry", dig) + // go func() { + // c.localRegistry.DeleteManifest(remoteRepo, art.Tag) + // }() + } + return man, err + } + ct, payload, err := man.Payload() + log.Info().Msgf("Content type: %s", ct) + if err != nil { + return man, err + } + + // Push manifest in background. + go func(_, ct string) { + session, _ := request.AuthSessionFrom(ctx) + ctx2 := request.WithAuthSession(context.Background(), session) + var count = 0 + for n := 0; n < maxManifestWait; n++ { + time.Sleep(sleepIntervalSec * time.Second) + count++ + log.Info().Msgf("Current retry=%v artifact: %v:%v", count, repoKey, imageName) + _, des, _, e := c.localRegistry.PullManifest(ctx2, art, acceptHeader, ifNoneMatchHeader) + if e != nil { + log.Info().Stack().Err(err).Msgf("failed to get manifest during remote cache update, error %v", err) + } + // Push manifest to localRegistry when pull with digest, or artifact not found, or digest mismatch. + errs := []error{} + if len(art.Tag) == 0 || e != nil || des.Digest.String() != dig { + artInfo := art + if len(artInfo.Digest) == 0 { + artInfo.Digest = dig + } + // Push manifest to localRegistry. + _, errs = c.localRegistry.PutManifest(ctx2, art, ct, ByteToReadCloser(payload), int64(len(payload))) + } + + // Query artifact after push. + if e == nil || commons.IsEmpty(errs) { + _, _, _, err := c.localRegistry.PullManifest(ctx2, art, acceptHeader, ifNoneMatchHeader) + if err != nil { + log.Error().Stack().Msgf("failed to get manifest, error %v", err) + } else { + log.Info().Msgf( + "Completed manifest push to localRegistry registry. Image: %s, Tag: %s, Digest: %s", + art.Image, art.Tag, art.Digest, + ) + return + } + } + // if e != nil { + // TODO: Place to send events + // SendPullEvent(bCtx, a, art.Tag, operator) + // } + } + }("System", ct) + + return man, nil +} + +func (c *controller) HeadManifest( + _ context.Context, + art pkg.RegistryInfo, + remote RemoteInterface, +) (bool, *manifest.Descriptor, error) { + remoteRepo := getRemoteRepo(art) + ref := getReference(art) + return remote.ManifestExist(remoteRepo, ref) +} + +func (c *controller) ProxyBlob( + ctx context.Context, + secretStore store2.SecretStore, + encrypter encrypt.Encrypter, + art pkg.RegistryInfo, + repoKey string, + proxy types.UpstreamProxy, +) (int64, io.ReadCloser, error) { + remoteImage := getRemoteRepo(art) + log.Debug().Msgf("The blob doesn't exist, proxy the request to the target server, url:%v", remoteImage) + + rHelper, err := NewRemoteHelper(ctx, secretStore, encrypter, repoKey, proxy) + if err != nil { + return 0, nil, err + } + + size, bReader, err := rHelper.BlobReader(remoteImage, art.Digest) + if err != nil { + log.Error().Stack().Err(err).Msgf("failed to pull blob, error %v", err) + return 0, nil, errcode.ErrorCodeBlobUnknown.WithDetail(art.Digest) + } + desc := manifest.Descriptor{Size: size, Digest: digest.Digest(art.Digest)} + go func(art pkg.RegistryInfo) { + // Cloning Context. + session, _ := request.AuthSessionFrom(ctx) + ctx2 := request.WithAuthSession(context.Background(), session) + err := c.putBlobToLocal(ctx2, art, remoteImage, repoKey, desc, rHelper) + if err != nil { + log.Error().Stack().Err(err).Msgf("error while putting blob to localRegistry registry, %v", err) + } + log.Info().Msgf("Successfully updated the cache for digest %s", art.Digest) + }(art) + return size, bReader, nil +} + +func (c *controller) putBlobToLocal( + ctx context.Context, + art pkg.RegistryInfo, + image string, + localRepo string, + desc manifest.Descriptor, + r RemoteInterface, +) error { + log.Debug(). + Msgf( + "Put blob to localRegistry registry!, sourceRepo:%v, localRepo:%v, digest: %v", image, localRepo, + desc.Digest, + ) + cl, bReader, err := r.BlobReader(image, string(desc.Digest)) + if err != nil { + log.Error().Stack().Err(err).Msgf("failed to create blob reader, error %v", err) + return err + } + defer bReader.Close() + headers, errs := c.localRegistry.InitBlobUpload(ctx, art, "", "") + if len(errs) > 0 { + log.Error().Stack().Err(err).Msgf("failed to init blob upload, error %v", errs) + return errs[0] + } + + location, uuid := headers.Headers["Location"], headers.Headers["Docker-Upload-UUID"] + parsedURL, err := url.Parse(location) + if err != nil { + log.Error().Err(err).Msgf("Error parsing URL: %s", err) + return err + } + stateToken := parsedURL.Query().Get("_state") + art.SetReference(uuid) + c.localRegistry.PushBlob(ctx, art, bReader, cl, stateToken) + return err +} + +func getRemoteRepo(art pkg.RegistryInfo) string { + return art.Image +} + +func getReference(art pkg.RegistryInfo) string { + if len(art.Digest) > 0 { + return art.Digest + } + return art.Tag +} diff --git a/registry/app/remote/controller/proxy/inflight.go b/registry/app/remote/controller/proxy/inflight.go new file mode 100644 index 000000000..3a2093872 --- /dev/null +++ b/registry/app/remote/controller/proxy/inflight.go @@ -0,0 +1,48 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import "sync" + +type inflightRequest struct { + mu sync.Mutex + reqMap map[string]interface{} +} + +var inflightChecker = &inflightRequest{ + reqMap: make(map[string]interface{}), +} + +// addRequest if the artifact already exist in the inflightRequest, return false +// else return true. +func (in *inflightRequest) addRequest(artifact string) (suc bool) { + in.mu.Lock() + defer in.mu.Unlock() + _, ok := in.reqMap[artifact] + if ok { + // Skip some following operation if it is in reqMap. + return false + } + in.reqMap[artifact] = 1 + return true +} + +func (in *inflightRequest) removeRequest(artifact string) { + in.mu.Lock() + defer in.mu.Unlock() + delete(in.reqMap, artifact) +} diff --git a/registry/app/remote/controller/proxy/inflight_test.go b/registry/app/remote/controller/proxy/inflight_test.go new file mode 100644 index 000000000..93c3624ab --- /dev/null +++ b/registry/app/remote/controller/proxy/inflight_test.go @@ -0,0 +1,33 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestInflightRequest(t *testing.T) { + artName := "hello-world:latest" + inflightChecker.addRequest(artName) + _, ok := inflightChecker.reqMap[artName] + assert.True(t, ok) + inflightChecker.removeRequest(artName) + _, exist := inflightChecker.reqMap[artName] + assert.False(t, exist) +} diff --git a/registry/app/remote/controller/proxy/local.go b/registry/app/remote/controller/proxy/local.go new file mode 100644 index 000000000..c4702576f --- /dev/null +++ b/registry/app/remote/controller/proxy/local.go @@ -0,0 +1,89 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "context" + "io" + + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/storage" + + "github.com/opencontainers/go-digest" +) + +// registryInterface defines operations related to localRegistry registry under proxy mode. +type registryInterface interface { + Base() error + PullManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + acceptHeaders []string, + ifNoneMatchHeader []string, + ) ( + responseHeaders *commons.ResponseHeaders, + descriptor manifest.Descriptor, + manifest manifest.Manifest, + Errors []error, + ) + PutManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + mediaType string, + body io.ReadCloser, + length int64, + ) (*commons.ResponseHeaders, []error) + DeleteManifest( + ctx context.Context, + artInfo pkg.RegistryInfo, + ) (errs []error, responseHeaders *commons.ResponseHeaders) + GetBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + ) ( + responseHeaders *commons.ResponseHeaders, fr *storage.FileReader, + size int64, readCloser io.ReadCloser, redirectURL string, + Errors []error, + ) + InitBlobUpload( + ctx context.Context, + artInfo pkg.RegistryInfo, + fromRepo, mountDigest string, + ) (*commons.ResponseHeaders, []error) + PushBlob( + ctx2 context.Context, + artInfo pkg.RegistryInfo, + body io.ReadCloser, + contentLength int64, + stateToken string, + ) (*commons.ResponseHeaders, []error) +} + +// registryInterface defines operations related to localRegistry registry under proxy mode. +type registryManifestInterface interface { + DBTag( + ctx context.Context, + mfst manifest.Manifest, + d digest.Digest, + tag string, + repoKey string, + headers *commons.ResponseHeaders, + info pkg.RegistryInfo, + ) error +} diff --git a/registry/app/remote/controller/proxy/remote.go b/registry/app/remote/controller/proxy/remote.go new file mode 100644 index 000000000..8db9241d4 --- /dev/null +++ b/registry/app/remote/controller/proxy/remote.go @@ -0,0 +1,114 @@ +// Source: https://github.com/goharbor/harbor + +// Copyright 2016 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "io" + + "github.com/harness/gitness/app/store" + "github.com/harness/gitness/encrypt" + api "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/remote/adapter" + "github.com/harness/gitness/registry/types" + + "github.com/rs/zerolog/log" + "golang.org/x/net/context" + + _ "github.com/harness/gitness/registry/app/remote/adapter/dockerhub" // This is required to init docker adapter +) + +const DockerHubURL = "https://registry-1.docker.io" + +// RemoteInterface defines operations related to remote repository under proxy. +type RemoteInterface interface { + // BlobReader create a reader for remote blob. + BlobReader(registry, dig string) (int64, io.ReadCloser, error) + // Manifest get manifest by reference. + Manifest(registry string, ref string) (manifest.Manifest, string, error) + // ManifestExist checks manifest exist, if exist, return digest. + ManifestExist(registry string, ref string) (bool, *manifest.Descriptor, error) + // ListTags returns all tags of the registry. + ListTags(registry string) ([]string, error) +} + +type remoteHelper struct { + repoKey string + // TODO: do we need image name here also? + registry adapter.ArtifactRegistry + upstreamProxy types.UpstreamProxy + URL string +} + +// NewRemoteHelper create a remote interface. +func NewRemoteHelper( + ctx context.Context, + secretStore store.SecretStore, + encrypter encrypt.Encrypter, + repoKey string, + proxy types.UpstreamProxy, +) (RemoteInterface, error) { + if proxy.Source == string(api.UpstreamConfigSourceDockerhub) { + proxy.RepoURL = DockerHubURL + } + r := &remoteHelper{ + repoKey: repoKey, + upstreamProxy: proxy, + } + if err := r.init(ctx, secretStore, encrypter); err != nil { + return nil, err + } + return r, nil +} + +func (r *remoteHelper) init(ctx context.Context, secretStore store.SecretStore, encrypter encrypt.Encrypter) error { + if r.registry != nil { + return nil + } + + // TODO add health check. + factory, err := adapter.GetFactory("docker") + if err != nil { + return err + } + adp, err := factory.Create(ctx, secretStore, encrypter, r.upstreamProxy) + if err != nil { + return err + } + reg, ok := adp.(adapter.ArtifactRegistry) + if !ok { + log.Warn().Msgf("Error: adp is not of type adapter.ArtifactRegistry") + } + r.registry = reg + return nil +} + +func (r *remoteHelper) BlobReader(registry, dig string) (int64, io.ReadCloser, error) { + return r.registry.PullBlob(registry, dig) +} + +func (r *remoteHelper) Manifest(registry string, ref string) (manifest.Manifest, string, error) { + return r.registry.PullManifest(registry, ref) +} + +func (r *remoteHelper) ManifestExist(registry string, ref string) (bool, *manifest.Descriptor, error) { + return r.registry.ManifestExist(registry, ref) +} + +func (r *remoteHelper) ListTags(registry string) ([]string, error) { + return r.registry.ListTags(registry) +} diff --git a/registry/app/storage/blobs.go b/registry/app/storage/blobs.go new file mode 100644 index 000000000..d4e4c9f95 --- /dev/null +++ b/registry/app/storage/blobs.go @@ -0,0 +1,161 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/harness/gitness/registry/app/manifest" + + "github.com/distribution/reference" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrBlobExists returned when blob already exists. + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// BlobInvalidDigestError returned when digest check fails. +type BlobInvalidDigestError struct { + Digest digest.Digest + Reason error +} + +func (err BlobInvalidDigestError) Error() string { + return fmt.Sprintf( + "invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason, + ) +} + +// BlobMountedError returned when a blob is mounted from another repository +// instead of initiating an upload session. +type BlobMountedError struct { + From reference.Canonical + Descriptor manifest.Descriptor +} + +func (err BlobMountedError) Error() string { + return fmt.Sprintf( + "blob mounted from: %v to: %v", + err.From, err.Descriptor, + ) +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, pathPrefix string, provisional manifest.Descriptor) ( + canonical manifest.Descriptor, err error, + ) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// OciBlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type OciBlobStore interface { + + // ServeBlobInternal attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlobInternal( + ctx context.Context, + pathPrefix string, + dgst digest.Digest, + headers map[string]string, + method string, + ) (*FileReader, string, int64, error) + + Delete(ctx context.Context, pathPrefix string, dgst digest.Digest) error + + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, pathPrefix string, dgst digest.Digest) (manifest.Descriptor, error) + + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, pathPrefix string, dgst digest.Digest) ([]byte, error) + + // Open provides an [io.ReadSeekCloser] to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error is returned. + Open(ctx context.Context, pathPrefix string, dgst digest.Digest) (io.ReadSeekCloser, error) + + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, pathPrefix string, p []byte) (manifest.Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) + + Path() string +} diff --git a/registry/app/storage/blobwriter.go b/registry/app/storage/blobwriter.go new file mode 100644 index 000000000..663894c5a --- /dev/null +++ b/registry/app/storage/blobwriter.go @@ -0,0 +1,373 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/manifest" + + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +var errResumableDigestNotAvailable = errors.New("resumable digest not available") + +const ( + // digestSha256Empty is the canonical sha256 digest of empty data. + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// blobWriter is used to control the various aspects of resumable +// blob upload. +type blobWriter struct { + ctx context.Context + blobStore *ociBlobStore + + id string + digester digest.Digester + written int64 // track the write to digester + + fileWriter driver.FileWriter + driver driver.StorageDriver + path string + + resumableDigestEnabled bool + committed bool +} + +var _ BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, pathPrefix string, desc manifest.Descriptor) ( + manifest.Descriptor, error, +) { + dcontext.GetLogger(ctx, log.Debug()).Msg("(*blobWriter).Commit") + + if err := bw.fileWriter.Commit(ctx); err != nil { + return manifest.Descriptor{}, err + } + + bw.Close() + desc.Size = bw.Size() + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return manifest.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, pathPrefix, canonical); err != nil { + return manifest.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return manifest.Descriptor{}, err + } + + bw.committed = true + return canonical, nil +} + +// Cancel the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + dcontext.GetLogger(ctx, log.Debug()).Msg("(*blobWriter).Cancel") + if err := bw.fileWriter.Cancel(ctx); err != nil { + return err + } + + if err := bw.Close(); err != nil { + dcontext.GetLogger(ctx, log.Error()).Msgf("error closing blobwriter: %s", err) + } + + return bw.removeResources(ctx) +} + +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && !errors.Is(err, errResumableDigestNotAvailable) { + return 0, err + } + + _, err := bw.fileWriter.Write(p) + if err != nil { + return 0, err + } + + n, err := bw.digester.Hash().Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && !errors.Is(err, errResumableDigestNotAvailable) { + return err + } + + return bw.fileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc manifest.Descriptor) (manifest.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return manifest.Descriptor{}, BlobInvalidDigestError{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + var size int64 + + // Stat the on disk file + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { + if errors.As(err, &driver.PathNotFoundError{}) { + desc.Size = 0 + } else { + // Any other error we want propagated up the stack. + return manifest.Descriptor{}, err + } + } else { + if fi.IsDir() { + return manifest.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != size { + return manifest.Descriptor{}, ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = size + } + + if err := bw.resumeDigest(ctx); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if errors.Is(err, errResumableDigestNotAvailable) { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return manifest.Descriptor{}, err + } + + if fullHash && bw.written == size && digest.Canonical == desc.Digest.Algorithm() { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + if fullHash && !verified { + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + digester := digest.Canonical.Digester() + verifier := desc.Digest.Verifier() + + // Read the file from the backend driver and validate it. + fr, err := NewFileReader(ctx, bw.driver, bw.path, desc.Size) + if err != nil { + return manifest.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(verifier, tr); err != nil { + return manifest.Descriptor{}, err + } + + canonical = digester.Digest() + verified = verifier.Verified() + } + if !verified { + dcontext.GetLoggerWithFields( + ctx, log.Ctx(ctx).Error(), + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided", + ). + Msg("canonical digest does match provided digest") + return manifest.Descriptor{}, BlobInvalidDigestError{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, pathPrefix string, desc manifest.Descriptor) error { + blobPath, err := pathFor( + blobDataPathSpec{ + digest: desc.Digest, + path: pathPrefix, + }, + ) + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + log.Ctx(ctx).Info().Msgf("Error type: %T, value: %v\n", err, err) + if !errors.As(err, &driver.PathNotFoundError{}) { + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // blobs. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + if errors.As(err, &driver.PathNotFoundError{}) { + if desc.Digest == digestSha256Empty { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + log.Ctx(ctx).Warn(). + Interface("upload.id", bw.ID()). + Interface("digest", desc.Digest). + Msg("attempted to move zero-length content with non-zero digest") + } else { + return err // unrelated error + } + } + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor( + uploadDataPathSpec{ + path: bw.blobStore.rootParentRef, + repoName: bw.blobStore.repoKey, + id: bw.id, + }, + ) + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + if !errors.As(err, &driver.PathNotFoundError{}) { + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + dcontext.GetLogger(ctx, log.Error()).Msgf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + try := 1 + for try <= 5 { + _, err := bw.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + if errors.As(err, &driver.PathNotFoundError{}) { + dcontext.GetLogger(bw.ctx, log.Debug()).Msgf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + } else { + return nil, err + } + } + + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/registry/app/storage/blobwriter_resumable.go b/registry/app/storage/blobwriter_resumable.go new file mode 100644 index 000000000..e4a05a000 --- /dev/null +++ b/registry/app/storage/blobwriter_resumable.go @@ -0,0 +1,167 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noresumabledigest +// +build !noresumabledigest + +package storage + +import ( + "context" + "encoding" + "errors" + "fmt" + "hash" + "path" + "strconv" + + storagedriver "github.com/harness/gitness/registry/app/driver" + + "github.com/rs/zerolog/log" +) + +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(encoding.BinaryUnmarshaler) + if !ok { + return errResumableDigestNotAvailable + } + + offset := bw.fileWriter.Size() + if offset == bw.written { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %w", offset, err) + } + + // Find the highest stored hashState with offset equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.(hash.Hash).Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.UnmarshalBinary(storedState); err != nil { + return err + } + bw.written = hashStateMatch.offset + } + + // Mind the gap. + if gapLen := offset - bw.written; gapLen > 0 { + return errResumableDigestNotAvailable + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor( + uploadHashStatePathSpec{ + path: bw.blobStore.rootParentRef, + repoName: bw.blobStore.repoKey, + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }, + ) + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if ok := errors.As(err, &storagedriver.PathNotFoundError{}); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + log.Error().Msgf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(encoding.BinaryMarshaler) + if !ok { + return errResumableDigestNotAvailable + } + + state, err := h.MarshalBinary() + if err != nil { + return err + } + + uploadHashStatePath, err := pathFor( + uploadHashStatePathSpec{ + path: bw.blobStore.rootParentRef, + repoName: bw.blobStore.repoKey, + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: bw.written, + }, + ) + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, state) +} diff --git a/registry/app/storage/errors.go b/registry/app/storage/errors.go new file mode 100644 index 000000000..ed9cf6945 --- /dev/null +++ b/registry/app/storage/errors.go @@ -0,0 +1,149 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed. +var ErrUnsupported = errors.New("operation unsupported") + +// TagUnknownError is returned if the given tag is not known by the tag service. +type TagUnknownError struct { + Tag string +} + +func (err TagUnknownError) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// RegistryUnknownError is returned if the named repository is not known by +// the StorageService. +type RegistryUnknownError struct { + Name string +} + +func (err RegistryUnknownError) Error() string { + return fmt.Sprintf("unknown registry name=%s", err.Name) +} + +// RegistryNameInvalidError should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type RegistryNameInvalidError struct { + Name string + Reason error +} + +func (err RegistryNameInvalidError) Error() string { + return fmt.Sprintf("registry name %q invalid: %v", err.Name, err.Reason) +} + +// ManifestUnknownError is returned if the manifest is not known by the +// StorageService. +type ManifestUnknownError struct { + Name string + Tag string +} + +func (err ManifestUnknownError) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ManifestUnknownRevisionError is returned when a manifest cannot be found by +// revision within a repository. +type ManifestUnknownRevisionError struct { + Name string + Revision digest.Digest +} + +func (err ManifestUnknownRevisionError) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ManifestUnverifiedError is returned when the StorageService is unable to verify +// the manifest. +type ManifestUnverifiedError struct{} + +func (ManifestUnverifiedError) Error() string { + return "unverified manifest" +} + +// ManifestReferencesExceedLimitError is returned when a manifest has too many references. +type ManifestReferencesExceedLimitError struct { + References int + Limit int +} + +func (err ManifestReferencesExceedLimitError) Error() string { + return fmt.Sprintf("%d manifest references exceed reference limit of %d", err.References, err.Limit) +} + +// ManifestPayloadSizeExceedsLimitError is returned when a manifest is bigger than the configured payload +// size limit. +type ManifestPayloadSizeExceedsLimitError struct { + PayloadSize int + Limit int +} + +// Error implements the error interface for ManifestPayloadSizeExceedsLimitError. +func (err ManifestPayloadSizeExceedsLimitError) Error() string { + return fmt.Sprintf("manifest payload size of %d exceeds limit of %d", err.PayloadSize, err.Limit) +} + +// ManifestVerificationErrors provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ManifestVerificationErrors []error + +func (errs ManifestVerificationErrors) Error() string { + parts := make([]string, 0, len(errs)) + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ManifestBlobUnknownError returned when a referenced blob cannot be found. +type ManifestBlobUnknownError struct { + Digest digest.Digest +} + +func (err ManifestBlobUnknownError) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ManifestNameInvalidError should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ManifestNameInvalidError struct { + Name string + Reason error +} + +func (err ManifestNameInvalidError) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/registry/app/storage/filereader.go b/registry/app/storage/filereader.go new file mode 100644 index 000000000..ac9c6c52b --- /dev/null +++ b/registry/app/storage/filereader.go @@ -0,0 +1,181 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/harness/gitness/registry/app/driver" +) + +const fileReaderBufferSize = 4 * 1024 * 1024 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type FileReader struct { + driver driver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// NewFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func NewFileReader(ctx context.Context, driver driver.StorageDriver, path string, size int64) (*FileReader, error) { + return &FileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *FileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *FileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + newOffset = fr.size + offset + case io.SeekStart: + newOffset = offset + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *FileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("FileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *FileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) + if err != nil { + if errors.As(err, &driver.PathNotFoundError{}) { + return io.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, err + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *FileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *FileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/registry/app/storage/gcstoragelient.go b/registry/app/storage/gcstoragelient.go new file mode 100644 index 000000000..bad0042d0 --- /dev/null +++ b/registry/app/storage/gcstoragelient.go @@ -0,0 +1,51 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "github.com/harness/gitness/registry/app/driver" + + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +type GcStorageClient struct { + StorageDeleter driver.StorageDeleter +} + +func NewGcStorageClient(storageDeleter driver.StorageDeleter) *GcStorageClient { + return &GcStorageClient{ + StorageDeleter: storageDeleter, + } +} + +// RemoveBlob removes a blob from the filesystem. +func (sc *GcStorageClient) RemoveBlob(ctx context.Context, dgst digest.Digest, rootParentRef string) error { + blobPath, err := pathFor(blobPathSpec{digest: dgst, path: rootParentRef}) + if err != nil { + return err + } + + log.Ctx(ctx).Info().Msgf("deleting blob from storage, digest: %s , path: %s", dgst.String(), rootParentRef) + if err := sc.StorageDeleter.Delete(ctx, blobPath); err != nil { + return err + } + + return nil +} diff --git a/registry/app/storage/io.go b/registry/app/storage/io.go new file mode 100644 index 000000000..15c880bb9 --- /dev/null +++ b/registry/app/storage/io.go @@ -0,0 +1,87 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "io" + + "github.com/harness/gitness/registry/app/driver" +) + +const ( + maxBlobGetSize = 4 * 1024 * 1024 +) + +func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) { + r, err := driver.Reader(ctx, p, 0) + if err != nil { + return nil, err + } + defer r.Close() + + return readAllLimited(r, maxBlobGetSize) +} + +func readAllLimited(r io.Reader, limit int64) ([]byte, error) { + r = limitReader(r, limit) + return io.ReadAll(r) +} + +// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader, +// this returns an error when the limit reached. +func limitReader(r io.Reader, n int64) io.Reader { + return &limitedReader{r: r, n: n} +} + +// limitedReader implements a reader that errors when the limit is reached. +// +// Partially cribbed from net/http.MaxBytesReader. +type limitedReader struct { + r io.Reader // underlying reader + n int64 // max bytes remaining + err error // sticky error +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.err != nil { + return 0, l.err + } + if len(p) == 0 { + return 0, nil + } + // If they asked for a 32KB byte read but only 5 bytes are + // remaining, no need to read 32KB. 6 bytes will answer the + // question of the whether we hit the limit or go past it. + if int64(len(p)) > l.n+1 { + p = p[:l.n+1] + } + n, err = l.r.Read(p) + + if int64(n) <= l.n { + l.n -= int64(n) + l.err = err + return n, err + } + + n = int(l.n) + l.n = 0 + + l.err = errors.New("storage: read exceeds limit") + return n, l.err +} diff --git a/registry/app/storage/middleware.go b/registry/app/storage/middleware.go new file mode 100644 index 000000000..dce770f05 --- /dev/null +++ b/registry/app/storage/middleware.go @@ -0,0 +1,26 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +var ( + registryoptions []Option +) + +// GetRegistryOptions returns list of StorageOption. +func GetRegistryOptions() []Option { + return registryoptions +} diff --git a/registry/app/storage/ociblobstore.go b/registry/app/storage/ociblobstore.go new file mode 100644 index 000000000..78ecac2c5 --- /dev/null +++ b/registry/app/storage/ociblobstore.go @@ -0,0 +1,302 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/harness/gitness/registry/app/dist_temp/dcontext" + "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/manifest" + "github.com/harness/gitness/registry/app/pkg/commons" + + "github.com/google/uuid" + "github.com/opencontainers/go-digest" + "github.com/rs/zerolog/log" +) + +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +type ociBlobStore struct { + repoKey string + driver driver.StorageDriver + // only to be used where context can't come through method args + ctx context.Context + deleteEnabled bool + resumableDigestEnabled bool + pathFn func(pathPrefix string, dgst digest.Digest) (string, error) + redirect bool // allows disabling RedirectURL redirects + rootParentRef string +} + +var _ OciBlobStore = &ociBlobStore{} + +func (bs *ociBlobStore) Path() string { + return bs.rootParentRef +} + +// Create begins a blob write session, returning a handle. +func (bs *ociBlobStore) Create(ctx context.Context) (BlobWriter, error) { + dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*ociBlobStore).Create") + uuid := uuid.NewString() + + path, err := pathFor( + uploadDataPathSpec{ + path: bs.rootParentRef, + repoName: bs.repoKey, + id: uuid, + }, + ) + if err != nil { + return nil, err + } + + return bs.newBlobUpload(ctx, uuid, path, false) +} + +func (bs *ociBlobStore) Resume(ctx context.Context, id string) (BlobWriter, error) { + dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*ociBlobStore).Resume") + + path, err := pathFor( + uploadDataPathSpec{ + path: bs.rootParentRef, + repoName: bs.repoKey, + id: id, + }, + ) + if err != nil { + return nil, err + } + + return bs.newBlobUpload(ctx, id, path, true) +} + +func (bs *ociBlobStore) Delete(_ context.Context, _ string, _ digest.Digest) error { + return ErrUnsupported +} + +func (bs *ociBlobStore) ServeBlobInternal( + ctx context.Context, + pathPrefix string, + dgst digest.Digest, + headers map[string]string, + method string, +) (*FileReader, string, int64, error) { + desc, err := bs.Stat(ctx, pathPrefix, dgst) + if err != nil { + return nil, "", 0, err + } + if desc.MediaType != "" { + // Set the repository local content type. + headers[commons.HeaderContentType] = desc.MediaType + } + size := desc.Size + path, err := bs.pathFn(pathPrefix, desc.Digest) + if err != nil { + return nil, "", size, err + } + + if bs.redirect { + redirectURL, err := bs.driver.RedirectURL(ctx, method, path) + if err != nil { + return nil, "", size, err + } + if redirectURL != "" { + // Redirect to storage URL. + // http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return nil, redirectURL, size, nil + } + // Fallback to serving the content directly. + } + + br, err := NewFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + if br != nil { + br.Close() + } + return nil, "", size, err + } + + headers[commons.HeaderEtag] = fmt.Sprintf(`"%s"`, desc.Digest) + // If-None-Match handled by ServeContent + headers[commons.HeaderCacheControl] = fmt.Sprintf( + "max-age=%.f", + blobCacheControlMaxAge.Seconds(), + ) + + if headers[commons.HeaderDockerContentDigest] == "" { + headers[commons.HeaderDockerContentDigest] = desc.Digest.String() + } + + if headers[commons.HeaderContentType] == "" { + // Set the content type if not already set. + headers[commons.HeaderContentType] = desc.MediaType + } + + if headers[commons.HeaderContentLength] == "" { + // Set the content length if not already set. + headers[commons.HeaderContentLength] = fmt.Sprint(desc.Size) + } + + return br, "", size, err +} + +func (bs *ociBlobStore) Get( + ctx context.Context, pathPrefix string, + dgst digest.Digest, +) ([]byte, error) { + canonical, err := bs.Stat(ctx, pathPrefix, dgst) + if err != nil { + return nil, err + } + + bp, err := bs.pathFn(pathPrefix, canonical.Digest) + if err != nil { + return nil, err + } + + p, err := getContent(ctx, bs.driver, bp) + if err != nil { + if errors.As(err, &driver.PathNotFoundError{}) { + return nil, ErrBlobUnknown + } + return nil, err + } + + return p, nil +} + +func (bs *ociBlobStore) Open( + ctx context.Context, pathPrefix string, + dgst digest.Digest, +) (io.ReadSeekCloser, error) { + desc, err := bs.Stat(ctx, pathPrefix, dgst) + if err != nil { + return nil, err + } + + path, err := bs.pathFn(pathPrefix, desc.Digest) + if err != nil { + return nil, err + } + + return NewFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. +// If thebcontent is already present, only the digest will be returned. +// This shouldbonly be used for small objects, such as manifests. +// This implemented as a convenience for other Put implementations. +func (bs *ociBlobStore) Put( + ctx context.Context, pathPrefix string, + p []byte, +) (manifest.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.Stat(ctx, pathPrefix, dgst) + if err == nil { + // content already present + return desc, nil + } else if !errors.Is(err, ErrBlobUnknown) { + dcontext.GetLogger( + ctx, log.Error(), + ).Msgf( + "ociBlobStore: error stating content (%v): %v", dgst, err, + ) + // real error, return it + return manifest.Descriptor{}, err + } + + bp, err := bs.pathFn(pathPrefix, dgst) + if err != nil { + return manifest.Descriptor{}, err + } + + return manifest.Descriptor{ + Size: int64(len(p)), + + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +// Stat returns the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *ociBlobStore) Stat( + ctx context.Context, pathPrefix string, + dgst digest.Digest, +) (manifest.Descriptor, error) { + path, err := pathFor( + blobDataPathSpec{ + digest: dgst, + path: pathPrefix, + }, + ) + if err != nil { + return manifest.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + if errors.As(err, &driver.PathNotFoundError{}) { + return manifest.Descriptor{}, ErrBlobUnknown + } + return manifest.Descriptor{}, err + } + + if fi.IsDir() { + dcontext.GetLogger( + ctx, log.Warn(), + ).Msgf("blob path should not be a directory: %q", path) + return manifest.Descriptor{}, ErrBlobUnknown + } + + return manifest.Descriptor{ + Size: fi.Size(), + + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +// newBlobUpload allocates a new upload controller with the given state. +func (bs *ociBlobStore) newBlobUpload( + ctx context.Context, uuid, + path string, a bool, +) (BlobWriter, error) { + fw, err := bs.driver.Writer(ctx, path, a) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + ctx: ctx, + blobStore: bs, + id: uuid, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: bs.driver, + path: path, + resumableDigestEnabled: bs.resumableDigestEnabled, + } + + return bw, nil +} diff --git a/registry/app/storage/paths.go b/registry/app/storage/paths.go new file mode 100644 index 000000000..c50462d8f --- /dev/null +++ b/registry/app/storage/paths.go @@ -0,0 +1,174 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + storagePathRoot = "/" + docker = "docker" + blobs = "blobs" +) + +func pathFor(spec pathSpec) (string, error) { + rootPrefix := []string{storagePathRoot} + switch v := spec.(type) { + case blobsPathSpec: + blobsPathPrefix := rootPrefix + blobsPathPrefix = append(blobsPathPrefix, blobs) + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + blobPathPrefix := rootPrefix + blobPathPrefix = append(blobPathPrefix, v.path, docker, blobs) + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := rootPrefix + blobPathPrefix = append(blobPathPrefix, v.path, docker, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(rootPrefix, v.path, docker, "_uploads", v.repoName, v.id, "data")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join( + append( + rootPrefix, v.path, docker, "_uploads", v.repoName, v.id, "hashstates", + string(v.alg), offset, + )..., + ), nil + case repositoriesRootPathSpec: + return path.Join(rootPrefix...), nil + default: + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// blobsPathSpec contains the path for the blobs directory. +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest + path string +} + +func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the StorageService global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest + path string +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + path string + repoName string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + path string + repoName string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories. +type repositoriesRootPathSpec struct{} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Encoded() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} diff --git a/registry/app/storage/storageservice.go b/registry/app/storage/storageservice.go new file mode 100644 index 000000000..6880e9c17 --- /dev/null +++ b/registry/app/storage/storageservice.go @@ -0,0 +1,93 @@ +// Source: https://github.com/distribution/distribution + +// Copyright 2014 https://github.com/distribution/distribution Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "github.com/harness/gitness/registry/app/driver" + + "github.com/opencontainers/go-digest" +) + +type Service struct { + deleteEnabled bool + resumableDigestEnabled bool + redirect bool + driver driver.StorageDriver +} + +// Option is the type used for functional options for NewRegistry. +type Option func(*Service) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).RedirectURL to serve all blobs. +func EnableRedirect(registry *Service) error { + registry.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *Service) error { + registry.deleteEnabled = true + return nil +} + +func NewStorageService(driver driver.StorageDriver, options ...Option) (*Service, error) { + registry := &Service{ + resumableDigestEnabled: true, + driver: driver, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +func (storage *Service) OciBlobsStore(ctx context.Context, repoKey string, rootParentRef string) OciBlobStore { + return &ociBlobStore{ + repoKey: repoKey, + ctx: ctx, + driver: storage.driver, + pathFn: PathFn, + redirect: storage.redirect, + deleteEnabled: storage.deleteEnabled, + resumableDigestEnabled: storage.resumableDigestEnabled, + rootParentRef: rootParentRef, + } +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func PathFn(pathPrefix string, dgst digest.Digest) (string, error) { + bp, err := pathFor( + blobDataPathSpec{ + digest: dgst, + path: pathPrefix, + }, + ) + if err != nil { + return "", err + } + + return bp, nil +} diff --git a/registry/app/store/database.go b/registry/app/store/database.go new file mode 100644 index 000000000..e2e286fe6 --- /dev/null +++ b/registry/app/store/database.go @@ -0,0 +1,454 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "context" + "time" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/types" + + "github.com/lib/pq" + "github.com/opencontainers/go-digest" +) + +type MediaTypesRepository interface { + MapMediaType(ctx context.Context, mediaType string) (int64, error) + MediaTypeExists(ctx context.Context, mediaType string) (bool, error) +} + +type BlobRepository interface { + FindByID(ctx context.Context, id int64) (*types.Blob, error) + FindByDigestAndRootParentID( + ctx context.Context, d digest.Digest, + rootParentID int64, + ) (*types.Blob, error) + FindByDigestAndRepoID( + ctx context.Context, d digest.Digest, repoID int64, + imageName string, + ) (*types.Blob, error) + CreateOrFind(ctx context.Context, b *types.Blob) (*types.Blob, error) + DeleteByID(ctx context.Context, id int64) error + ExistsBlob( + ctx context.Context, repoID int64, d digest.Digest, + image string, + ) (bool, error) +} + +type CleanupPolicyRepository interface { + // GetIdsByRegistryId the CleanupPolicy Ids specified by Registry Key + GetIDsByRegistryID(ctx context.Context, id int64) (ids []int64, err error) + // GetByRegistryId the CleanupPolicy specified by Registry Key + GetByRegistryID( + ctx context.Context, + id int64, + ) (cleanupPolicies *[]types.CleanupPolicy, err error) + // Create a CleanupPolicy + Create( + ctx context.Context, + cleanupPolicy *types.CleanupPolicy, + ) (id int64, err error) + // Delete the CleanupPolicy specified by repokey and name + Delete(ctx context.Context, id int64) (err error) + // Update the CleanupPolicy. + ModifyCleanupPolicies( + ctx context.Context, + cleanupPolicies *[]types.CleanupPolicy, ids []int64, + ) error +} + +type ManifestRepository interface { + // FindAll finds all manifests. + FindAll(ctx context.Context) (types.Manifests, error) + // Count counts all manifests. + Count(ctx context.Context) (int, error) + // LayerBlobs finds layer blobs associated with a manifest, + // through the `layers` relationship entity. + LayerBlobs(ctx context.Context, m *types.Manifest) (types.Blobs, error) + // References finds all manifests directly + // referenced by a manifest (if any). + References(ctx context.Context, m *types.Manifest) (types.Manifests, error) + // Create saves a new Manifest. ID value is updated in given request object + Create(ctx context.Context, m *types.Manifest) error + // CreateOrFind attempts to create a manifest. If the manifest already exists + // (same digest in the scope of a given repository) + // that record is loaded from the database into m. + // This is similar to a repositoryStore.FindManifestByDigest followed by + // a Create, but without being prone to race conditions on write + // operations between the corresponding read (FindManifestByDigest2) + // and write (Create) operations. + // Separate Find* and Create method calls should be preferred + // to this when race conditions are not a concern. + CreateOrFind(ctx context.Context, m *types.Manifest) error + AssociateLayerBlob(ctx context.Context, m *types.Manifest, b *types.Blob) error + DissociateLayerBlob(ctx context.Context, m *types.Manifest, b *types.Blob) error + Delete(ctx context.Context, registryID, id int64) error + FindManifestByDigest( + ctx context.Context, repoID int64, imageName string, + digest types.Digest, + ) (*types.Manifest, error) + FindManifestByTagName( + ctx context.Context, repoID int64, imageName string, + tag string, + ) (*types.Manifest, error) + FindManifestPayloadByTagName( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + version string, + ) (*types.Payload, error) + GetManifestPayload( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + digest types.Digest, + ) (*types.Payload, error) + Get(ctx context.Context, manifestID int64) (*types.Manifest, error) + DeleteManifest( + ctx context.Context, repoID int64, + imageName string, d digest.Digest, + ) (bool, error) + ListManifestsBySubject( + ctx context.Context, repoID int64, + id int64, + ) (types.Manifests, error) + ListManifestsBySubjectDigest( + ctx context.Context, repoID int64, + digest types.Digest, + ) (types.Manifests, error) +} + +type ManifestReferenceRepository interface { + AssociateManifest( + ctx context.Context, ml *types.Manifest, + m *types.Manifest, + ) error + DissociateManifest( + ctx context.Context, ml *types.Manifest, + m *types.Manifest, + ) error +} + +type LayerRepository interface { + AssociateLayerBlob(ctx context.Context, m *types.Manifest, b *types.Blob) error +} + +type TagRepository interface { + // CreateOrUpdate upsert a tag. A tag with a given name + // on a given repository may not exist (in which case it should be + // inserted), already exist and point to the same manifest + // (in which case nothing needs to be done) or already exist but + // points to a different manifest (in which case it should be updated). + CreateOrUpdate(ctx context.Context, t *types.Tag) error + LockTagByNameForUpdate( + ctx context.Context, repoID int64, + name string, + ) (bool, error) + DeleteTagByName( + ctx context.Context, repoID int64, + name string, + ) (bool, error) + DeleteTagByManifestID( + ctx context.Context, repoID int64, + manifestID int64, + ) (bool, error) + TagsPaginated( + ctx context.Context, repoID int64, image string, + filters types.FilterParams, + ) ([]*types.Tag, error) + HasTagsAfterName( + ctx context.Context, repoID int64, + filters types.FilterParams, + ) (bool, error) + + GetAllArtifactsByParentID( + ctx context.Context, parentID int64, + packageTypes *[]string, sortByField string, + sortByOrder string, limit int, offset int, search string, + labels []string, + ) (*[]types.ArtifactMetadata, error) + + CountAllArtifactsByParentID( + ctx context.Context, parentID int64, + packageTypes *[]string, search string, + labels []string, + ) (int64, error) + + GetAllArtifactsByRepo( + ctx context.Context, parentID int64, repoKey string, + sortByField string, sortByOrder string, + limit int, offset int, search string, labels []string, + ) (*[]types.ArtifactMetadata, error) + + GetLatestTagMetadata( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + ) (*types.ArtifactMetadata, error) + + GetLatestTagName( + ctx context.Context, parentID int64, repoKey string, + imageName string, + ) (string, error) + + GetTagMetadata( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + name string, + ) (*types.TagMetadata, error) + + CountAllArtifactsByRepo( + ctx context.Context, parentID int64, repoKey string, + search string, labels []string, + ) (int64, error) + + GetTagDetail( + ctx context.Context, repoID int64, imageName string, + name string, + ) (*types.TagDetail, error) + + GetLatestTag(ctx context.Context, repoID int64, imageName string) (*types.Tag, error) + + GetAllTagsByRepoAndImage( + ctx context.Context, + parentID int64, + repoKey string, + image string, + sortByField string, + sortByOrder string, + limit int, + offset int, + search string, + ) (*[]types.TagMetadata, error) + + CountAllTagsByRepoAndImage( + ctx context.Context, parentID int64, repoKey string, + image string, search string, + ) (int64, error) + FindTag( + ctx context.Context, repoID int64, imageName string, + name string, + ) (*types.Tag, error) +} + +// UpstreamProxyConfig holds the record of a config of upstream proxy in DB. +type UpstreamProxyConfig struct { + ID int64 + RegistryID int64 + Source string + URL string + AuthType string + UserName string + Password string + Token string + CreatedAt time.Time + UpdatedAt time.Time +} + +type UpstreamProxyConfigRepository interface { + // Get the upstreamproxy specified by ID + Get(ctx context.Context, id int64) (upstreamProxy *types.UpstreamProxy, err error) + + // GetByRepoKey gets the upstreamproxy specified by registry key + GetByRegistryIdentifier( + ctx context.Context, + parentID int64, + repoKey string, + ) (upstreamProxy *types.UpstreamProxy, err error) + + // GetByParentUniqueId gets the upstreamproxy specified by parent id and parent unique id + GetByParentID(ctx context.Context, parentID string) ( + upstreamProxies *[]types.UpstreamProxy, + err error, + ) + + // Create a upstreamProxyConfig + Create(ctx context.Context, upstreamproxyRecord *types.UpstreamProxyConfig) ( + id int64, + err error, + ) + + // Delete the upstreamProxyConfig specified by registry key + Delete(ctx context.Context, parentID int64, repoKey string) (err error) + + // Update updates the upstreamproxy. + Update(ctx context.Context, upstreamproxyRecord *types.UpstreamProxyConfig) (err error) + + GetAll( + ctx context.Context, + parentID int64, + packageTypes []string, + sortByField string, + sortByOrder string, + limit int, + offset int, + search string, + ) (upstreamProxies *[]types.UpstreamProxy, err error) + + CountAll( + ctx context.Context, parentID string, packageTypes []string, + search string, + ) (count int64, err error) +} + +type RegistryMetadata struct { + RegIdentifier string + Description string + PackageType artifact.PackageType + Type artifact.RegistryType + LastModified time.Time + URL string + Labels pq.StringArray + ArtifactCount int64 + DownloadCount int64 + Size int64 +} + +type RegistryRepository interface { + // Get the repository specified by ID + Get(ctx context.Context, id int64) (repository *types.Registry, err error) + // GetByName gets the repository specified by name + GetByIDIn( + ctx context.Context, parentID int64, + ids []int64, + ) (registries *[]types.Registry, err error) + // GetByName gets the repository specified by parent id and name + GetByParentIDAndName( + ctx context.Context, parentID int64, + name string, + ) (registry *types.Registry, err error) + GetByRootParentIDAndName( + ctx context.Context, parentID int64, + name string, + ) (registry *types.Registry, err error) + // Create a repository + Create(ctx context.Context, repository *types.Registry) (id int64, err error) + // Delete the repository specified by ID + Delete(ctx context.Context, parentID int64, name string) (err error) + // Update updates the repository. Only the properties specified by "props" will be updated if it is set + Update(ctx context.Context, repository *types.Registry) (err error) + + GetAll( + ctx context.Context, + parentID int64, + packageTypes []string, + sortByField string, + sortByOrder string, + limit int, + offset int, + search string, + repoType string, + ) (repos *[]RegistryMetadata, err error) + + CountAll( + ctx context.Context, parentID int64, packageTypes []string, + search string, repoType string, + ) (count int64, err error) + + FetchUpstreamProxyIDs( + ctx context.Context, repokeys []string, + parentID int64, + ) (ids []int64, err error) + + FetchUpstreamProxyKeys(ctx context.Context, ids []int64) (repokeys []string, err error) +} + +type RegistryBlobRepository interface { + LinkBlob( + ctx context.Context, imageName string, + registry *types.Registry, blobID int64, + ) error + UnlinkBlob( + ctx context.Context, imageName string, + registry *types.Registry, blobID int64, + ) (bool, error) +} + +type ArtifactRepository interface { + // Get an Artifact specified by ID + Get(ctx context.Context, id int64) (*types.Artifact, error) + // Get an Artifact specified by Artifact Name + GetByName( + ctx context.Context, repoID int64, + name string, + ) (*types.Artifact, error) + // Get the Labels specified by Parent ID and Repo + GetLabelsByParentIDAndRepo( + ctx context.Context, parentID int64, + repo string, limit int, offset int, + search string, + ) (labels []string, err error) + // Count the Labels specified by Parent ID and Repo + CountLabelsByParentIDAndRepo( + ctx context.Context, parentID int64, + repo, search string, + ) (count int64, err error) + // Get an Artifact specified by Artifact Name + GetByRepoAndName( + ctx context.Context, parentID int64, + repo string, name string, + ) (*types.Artifact, error) + // Get the Labels specified by Parent ID + GetLabelsByParentID(ctx context.Context, parentID int64) (labels []string, err error) + // Create an Artifact + CreateOrUpdate(ctx context.Context, artifact *types.Artifact) error + // Update an Artifact + Update(ctx context.Context, artifact *types.Artifact) (err error) +} + +type ArtifactStatRepository interface { + CreateOrUpdate(ctx context.Context, artifactStat *types.ArtifactStat) error +} + +type GCBlobTaskRepository interface { + FindAll(ctx context.Context) ([]*types.GCBlobTask, error) + FindAndLockBefore( + ctx context.Context, blobID int64, + date time.Time, + ) (*types.GCBlobTask, error) + Count(ctx context.Context) (int, error) + Next(ctx context.Context) (*types.GCBlobTask, error) + Reschedule(ctx context.Context, b *types.GCBlobTask, d time.Duration) error + Postpone(ctx context.Context, b *types.GCBlobTask, d time.Duration) error + IsDangling(ctx context.Context, b *types.GCBlobTask) (bool, error) + Delete(ctx context.Context, b *types.GCBlobTask) error +} + +type GCManifestTaskRepository interface { + FindAll(ctx context.Context) ([]*types.GCManifestTask, error) + FindAndLock( + ctx context.Context, registryID, + manifestID int64, + ) (*types.GCManifestTask, error) + FindAndLockBefore( + ctx context.Context, registryID, manifestID int64, + date time.Time, + ) (*types.GCManifestTask, error) + FindAndLockNBefore( + ctx context.Context, registryID int64, + manifestIDs []int64, date time.Time, + ) ([]*types.GCManifestTask, error) + Count(ctx context.Context) (int, error) + Next(ctx context.Context) (*types.GCManifestTask, error) + Postpone(ctx context.Context, b *types.GCManifestTask, d time.Duration) error + IsDangling(ctx context.Context, b *types.GCManifestTask) (bool, error) + Delete(ctx context.Context, b *types.GCManifestTask) error +} diff --git a/registry/app/store/database/artifact.go b/registry/app/store/database/artifact.go new file mode 100644 index 000000000..7a9bea5f4 --- /dev/null +++ b/registry/app/store/database/artifact.go @@ -0,0 +1,336 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "sort" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + gitness_store "github.com/harness/gitness/store" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +type ArtifactDao struct { + db *sqlx.DB +} + +func NewArtifactDao(db *sqlx.DB) store.ArtifactRepository { + return &ArtifactDao{ + db: db, + } +} + +type artifactDB struct { + ID int64 `db:"artifact_id"` + Name string `db:"artifact_name"` + RegistryID int64 `db:"artifact_registry_id"` + Labels sql.NullString `db:"artifact_labels"` + Enabled bool `db:"artifact_enabled"` + CreatedAt int64 `db:"artifact_created_at"` + UpdatedAt int64 `db:"artifact_updated_at"` + CreatedBy int64 `db:"artifact_created_by"` + UpdatedBy int64 `db:"artifact_updated_by"` +} + +type artifactLabelDB struct { + Labels sql.NullString `db:"labels"` +} + +func (a ArtifactDao) Get(ctx context.Context, id int64) (*types.Artifact, error) { + q := databaseg.Builder.Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(artifactDB{}), ",")). + From("artifacts"). + Where("artifact_id = ?", id) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, a.db) + + dst := new(artifactDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact") + } + return a.mapToArtifact(ctx, dst) +} + +func (a ArtifactDao) GetByRepoAndName(ctx context.Context, parentID int64, + repo string, name string) (*types.Artifact, error) { + q := databaseg.Builder.Select("a.artifact_id, a.artifact_name, "+ + " a.artifact_registry_id, a.artifact_labels, a.artifact_created_at, "+ + " a.artifact_updated_at, a.artifact_created_by, a.artifact_updated_by"). + From("artifacts a"). + Join(" registries r ON r.registry_id = a.artifact_registry_id"). + Where("r.registry_parent_id = ? AND r.registry_name = ? AND a.artifact_name = ?", + parentID, repo, name) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, a.db) + + dst := new(artifactDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact") + } + return a.mapToArtifact(ctx, dst) +} + +func (a ArtifactDao) GetByName(ctx context.Context, repoID int64, name string) (*types.Artifact, error) { + q := databaseg.Builder.Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(artifactDB{}), ",")). + From("artifacts"). + Where("artifact_registry_id = ? AND artifact_name = ?", repoID, name) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, a.db) + + dst := new(artifactDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact") + } + return a.mapToArtifact(ctx, dst) +} + +func (a ArtifactDao) GetLabelsByParentIDAndRepo(ctx context.Context, parentID int64, repo string, + limit int, offset int, search string) (labels []string, err error) { + q := databaseg.Builder.Select("a.artifact_labels as labels"). + From("artifacts a"). + Join("registries r ON r.registry_id = a.artifact_registry_id"). + Where("r.registry_parent_id = ? AND r.registry_name = ?", parentID, repo) + + if search != "" { + q = q.Where("a.artifact_labels LIKE ?", "%"+search+"%") + } + + q = q.OrderBy("a.artifact_labels ASC").Limit(uint64(limit)).Offset(uint64(offset)) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + dst := []*artifactLabelDB{} + + db := dbtx.GetAccessor(ctx, a.db) + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact labels") + } + + return a.mapToArtifactLabels(dst), nil +} + +func (a ArtifactDao) CountLabelsByParentIDAndRepo(ctx context.Context, parentID int64, repo, + search string) (count int64, err error) { + q := databaseg.Builder.Select("a.artifact_labels as labels"). + From("artifacts a"). + Join("registries r ON r.registry_id = a.artifact_registry_id"). + Where("r.registry_parent_id = ? AND r.registry_name = ?", parentID, repo) + + if search != "" { + q = q.Where("a.artifact_labels LIKE ?", "%"+search+"%") + } + + sql, args, err := q.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, a.db) + + dst := []*artifactLabelDB{} + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact labels") + } + + return int64(len(dst)), nil +} + +func (a ArtifactDao) GetLabelsByParentID(ctx context.Context, parentID int64) (labels []string, err error) { + q := databaseg.Builder.Select("a.artifact_labels as labels"). + From("artifacts a"). + Join("registries r ON r.registry_id = a.artifact_registry_id"). + Where("r.registry_parent_id = ?", parentID) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, a.db) + + dst := []*artifactLabelDB{} + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get artifact labels") + } + + return a.mapToArtifactLabels(dst), nil +} + +func (a ArtifactDao) CreateOrUpdate(ctx context.Context, artifact *types.Artifact) error { + const sqlQuery = ` + INSERT INTO artifacts ( + artifact_registry_id + ,artifact_name + ,artifact_enabled + ,artifact_created_at + ,artifact_updated_at + ,artifact_created_by + ,artifact_updated_by + ) VALUES ( + :artifact_registry_id + ,:artifact_name + ,:artifact_enabled + ,:artifact_created_at + ,:artifact_updated_at + ,:artifact_created_by + ,:artifact_updated_by + ) + ON CONFLICT (artifact_registry_id, artifact_name) + DO UPDATE SET + artifact_enabled = :artifact_enabled + RETURNING artifact_id` + + db := dbtx.GetAccessor(ctx, a.db) + query, arg, err := db.BindNamed(sqlQuery, a.mapToInternalArtifact(ctx, artifact)) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind artifact object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&artifact.ID); err != nil && !errors.Is(err, sql.ErrNoRows) { + return databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + return nil +} + +func (a ArtifactDao) Update(ctx context.Context, artifact *types.Artifact) (err error) { + var sqlQuery = " UPDATE artifacts SET " + util.GetSetDBKeys(artifactDB{}, "artifact_id") + + " WHERE artifact_id = :artifact_id " + + dbArtifact := a.mapToInternalArtifact(ctx, artifact) + + // update Version (used for optimistic locking) and Updated time + dbArtifact.UpdatedAt = time.Now().UnixMilli() + + db := dbtx.GetAccessor(ctx, a.db) + + query, arg, err := db.BindNamed(sqlQuery, dbArtifact) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind artifact object") + } + + result, err := db.ExecContext(ctx, query, arg...) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to update artifact") + } + + count, err := result.RowsAffected() + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows") + } + + if count == 0 { + return gitness_store.ErrVersionConflict + } + + return nil +} + +func (a ArtifactDao) mapToInternalArtifact(ctx context.Context, in *types.Artifact) *artifactDB { + session, _ := request.AuthSessionFrom(ctx) + + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + + in.UpdatedAt = time.Now() + in.UpdatedBy = session.Principal.ID + + sort.Strings(in.Labels) + + return &artifactDB{ + ID: in.ID, + Name: in.Name, + RegistryID: in.RegistryID, + Labels: getEmptySQLString(util.ArrToString(in.Labels)), + Enabled: in.Enabled, + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + } +} + +func (a ArtifactDao) mapToArtifact(_ context.Context, dst *artifactDB) (*types.Artifact, error) { + createdBy := dst.CreatedBy + updatedBy := dst.UpdatedBy + return &types.Artifact{ + ID: dst.ID, + Name: dst.Name, + RegistryID: dst.RegistryID, + Labels: util.StringToArr(dst.Labels.String), + Enabled: dst.Enabled, + CreatedAt: time.UnixMilli(dst.CreatedAt), + UpdatedAt: time.UnixMilli(dst.UpdatedAt), + CreatedBy: createdBy, + UpdatedBy: updatedBy, + }, nil +} + +func (a ArtifactDao) mapToArtifactLabels(dst []*artifactLabelDB) []string { + elements := make(map[string]bool) + res := []string{} + for _, labels := range dst { + elements, res = a.mapToArtifactLabel(elements, res, labels) + } + return res +} + +func (a ArtifactDao) mapToArtifactLabel(elements map[string]bool, res []string, + dst *artifactLabelDB) (map[string]bool, []string) { + if dst == nil { + return elements, res + } + labels := util.StringToArr(dst.Labels.String) + for _, label := range labels { + if !elements[label] { + elements[label] = true + res = append(res, label) + } + } + return elements, res +} diff --git a/registry/app/store/database/artifact_stat.go b/registry/app/store/database/artifact_stat.go new file mode 100644 index 000000000..b61903162 --- /dev/null +++ b/registry/app/store/database/artifact_stat.go @@ -0,0 +1,126 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "errors" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/types" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" +) + +type ArtifactStatDao struct { + db *sqlx.DB +} + +func NewArtifactStatDao(db *sqlx.DB) store.ArtifactStatRepository { + return &ArtifactStatDao{ + db: db, + } +} + +type artifactStatDB struct { + ID int64 `db:"artifact_stat_id"` + ArtifactID int64 `db:"artifact_stat_artifact_id"` + Date int64 `db:"artifact_stat_date"` + DownloadCount int64 `db:"artifact_stat_download_count"` + UploadBytes int64 `db:"artifact_stat_upload_bytes"` + DownloadBytes int64 `db:"artifact_stat_download_bytes"` + CreatedAt int64 `db:"artifact_stat_created_at"` + UpdatedAt int64 `db:"artifact_stat_updated_at"` + CreatedBy int64 `db:"artifact_stat_created_by"` + UpdatedBy int64 `db:"artifact_stat_updated_by"` +} + +func (a ArtifactStatDao) CreateOrUpdate(ctx context.Context, artifactStat *types.ArtifactStat) error { + const sqlQuery = ` + INSERT INTO artifact_stats ( + artifact_stat_artifact_id + ,artifact_stat_date + ,artifact_stat_download_count + ,artifact_stat_upload_bytes + ,artifact_stat_download_bytes + ,artifact_stat_created_at + ,artifact_stat_updated_at + ,artifact_stat_created_by + ,artifact_stat_updated_by + ) VALUES ( + :artifact_stat_artifact_id + ,:artifact_stat_date + ,:artifact_stat_download_count + ,:artifact_stat_upload_bytes + ,:artifact_stat_download_bytes + ,:artifact_stat_created_at + ,:artifact_stat_updated_at + ,:artifact_stat_created_by + ,:artifact_stat_updated_by + + ) + ON CONFLICT (artifact_stat_artifact_id, artifact_stat_date) + DO UPDATE SET + artifact_stat_download_count = + artifact_stats.artifact_stat_download_count + EXCLUDED.artifact_stat_download_count, + artifact_stat_upload_bytes = + artifact_stats.artifact_stat_upload_bytes + EXCLUDED.artifact_stat_upload_bytes, + artifact_stat_download_bytes = + artifact_stats.artifact_stat_download_bytes + EXCLUDED.artifact_stat_download_bytes + RETURNING artifact_stat_id` + + db := dbtx.GetAccessor(ctx, a.db) + query, arg, err := db.BindNamed(sqlQuery, a.mapToInternalArtifactStat(ctx, artifactStat)) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind artifact object") + } + + if err = db.QueryRowContext(ctx, query, + arg...).Scan(&artifactStat.ID); err != nil && !errors.Is(err, sql.ErrNoRows) { + return databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + return nil +} + +func (a ArtifactStatDao) mapToInternalArtifactStat(ctx context.Context, in *types.ArtifactStat) *artifactStatDB { + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + + in.UpdatedAt = time.Now() + + return &artifactStatDB{ + ID: in.ID, + ArtifactID: in.ArtifactID, + Date: in.Date, + DownloadCount: in.DownloadCount, + UploadBytes: in.UploadBytes, + DownloadBytes: in.DownloadBytes, + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: session.Principal.ID, + } +} diff --git a/registry/app/store/database/blob.go b/registry/app/store/database/blob.go new file mode 100644 index 000000000..2547457a5 --- /dev/null +++ b/registry/app/store/database/blob.go @@ -0,0 +1,299 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + "github.com/opencontainers/go-digest" + errors2 "github.com/pkg/errors" +) + +type blobDao struct { + db *sqlx.DB + + //FIXME: Arvind: Move this to controller layer later + mtRepository store.MediaTypesRepository +} + +func NewBlobDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.BlobRepository { + return &blobDao{ + db: db, + mtRepository: mtRepository, + } +} + +var ( + PrimaryQuery = database.Builder.Select("blobs.blob_id as blob_id", "blob_media_type_id", "mt_media_type", + "blob_digest", "blob_size", "blob_created_at", "blob_root_parent_id"). + From("blobs"). + Join("media_types ON mt_id = blobs.blob_media_type_id") +) + +type blobDB struct { + ID int64 `db:"blob_id"` + RootParentID int64 `db:"blob_root_parent_id"` + Digest []byte `db:"blob_digest"` + MediaTypeID int64 `db:"blob_media_type_id"` + Size int64 `db:"blob_size"` + CreatedAt int64 `db:"blob_created_at"` + CreatedBy int64 `db:"blob_created_by"` +} + +type blobMetadataDB struct { + blobDB + MediaType string `db:"mt_media_type"` +} + +func (bd blobDao) FindByDigestAndRootParentID(ctx context.Context, d digest.Digest, + rootParentID int64) (*types.Blob, error) { + dgst, err := types.NewDigest(d) + if err != nil { + return nil, err + } + + digestBytes, err := util.GetHexDecodedBytes(string(dgst)) + if err != nil { + return nil, err + } + + stmt := PrimaryQuery. + Where("blob_root_parent_id = ?", rootParentID). + Where("blob_digest = ?", digestBytes) + + db := dbtx.GetAccessor(ctx, bd.db) + + dst := new(blobMetadataDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors2.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find blob") + } + + return bd.mapToBlob(dst) +} + +func (bd blobDao) FindByID(ctx context.Context, id int64) (*types.Blob, error) { + stmt := PrimaryQuery. + Where("blob_id = ?", id) + + db := dbtx.GetAccessor(ctx, bd.db) + + dst := new(blobMetadataDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors2.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find blob") + } + + return bd.mapToBlob(dst) +} + +func (bd blobDao) FindByDigestAndRepoID(ctx context.Context, d digest.Digest, repoID int64, + imageName string) (*types.Blob, error) { + dgst, err := types.NewDigest(d) + if err != nil { + return nil, err + } + + digestBytes, err := util.GetHexDecodedBytes(string(dgst)) + if err != nil { + return nil, err + } + + stmt := PrimaryQuery. + Join("registry_blobs ON rblob_blob_id = blobs.blob_id"). + Where("rblob_registry_id = ?", repoID). + Where("rblob_image_name = ?", imageName). + Where("blob_digest = ?", digestBytes) + + db := dbtx.GetAccessor(ctx, bd.db) + + dst := new(blobMetadataDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors2.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find blob") + } + + return bd.mapToBlob(dst) +} + +func (bd blobDao) CreateOrFind(ctx context.Context, b *types.Blob) (*types.Blob, error) { + sqlQuery := `INSERT INTO blobs ( + blob_digest, + blob_root_parent_id, + blob_media_type_id, + blob_size, + blob_created_at, + blob_created_by + ) VALUES ( + :blob_digest, + :blob_root_parent_id, + :blob_media_type_id, + :blob_size, + :blob_created_at, + :blob_created_by + ) ON CONFLICT ( + blob_digest, blob_root_parent_id + ) DO NOTHING + RETURNING blob_id` + + mediaTypeID, err := bd.mtRepository.MapMediaType(ctx, b.MediaType) + if err != nil { + return nil, err + } + b.MediaTypeID = mediaTypeID + + db := dbtx.GetAccessor(ctx, bd.db) + blob, err := mapToInternalBlob(ctx, b) + if err != nil { + return nil, err + } + query, arg, err := db.BindNamed(sqlQuery, blob) + if err != nil { + return nil, database.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&b.ID); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Insert query failed") + if !errors2.Is(err, store2.ErrResourceNotFound) { + return nil, err + } + } + + return bd.FindByDigestAndRootParentID(ctx, b.Digest, b.RootParentID) +} + +func (bd blobDao) DeleteByID(ctx context.Context, id int64) error { + stmt := database.Builder.Delete("blobs"). + Where("blob_id = ?", id) + + sql, args, err := stmt.ToSql() + if err != nil { + return fmt.Errorf("failed to convert purge blob query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, bd.db) + + _, err = db.ExecContext(ctx, sql, args...) + if err != nil { + return database.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + return nil +} + +func (bd blobDao) ExistsBlob(ctx context.Context, repoID int64, + d digest.Digest, image string) (bool, error) { + stmt := database.Builder.Select("EXISTS (SELECT 1 FROM registry_blobs " + + "JOIN blobs as b ON rblob_blob_id = b.blob_id " + + "WHERE rblob_registry_id = ? AND " + + "rblob_image_name = ? AND " + + "b.blob_digest = ?)") + + sql, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert exists blob query to sql: %w", err) + } + + var exists bool + db := dbtx.GetAccessor(ctx, bd.db) + newDigest, err := types.NewDigest(d) + if err != nil { + return false, err + } + bytes, err := util.GetHexDecodedBytes(string(newDigest)) + if err != nil { + return false, err + } + args = append(args, repoID, image, bytes) + + if err = db.GetContext(ctx, &exists, sql, args...); err != nil { + return false, database.ProcessSQLErrorf(ctx, err, "Failed to check exists blob") + } + + return exists, nil +} + +func mapToInternalBlob(ctx context.Context, in *types.Blob) (*blobDB, error) { + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + + in.CreatedBy = -1 + newDigest, err := types.NewDigest(in.Digest) + if err != nil { + return nil, err + } + + digestBytes, err := util.GetHexDecodedBytes(string(newDigest)) + if err != nil { + return nil, err + } + + return &blobDB{ + ID: in.ID, + RootParentID: in.RootParentID, + MediaTypeID: in.MediaTypeID, + Digest: digestBytes, + Size: in.Size, + CreatedAt: in.CreatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + }, nil +} + +func (bd blobDao) mapToBlob(dst *blobMetadataDB) (*types.Blob, error) { + createdBy := int64(-1) + dig := types.Digest(util.GetHexEncodedString(dst.Digest)) + parsedDigest, err := dig.Parse() + if err != nil { + return nil, err + } + return &types.Blob{ + ID: dst.ID, + RootParentID: dst.RootParentID, + MediaTypeID: dst.MediaTypeID, + MediaType: dst.MediaType, + Digest: parsedDigest, + Size: dst.Size, + CreatedAt: time.UnixMilli(dst.CreatedAt), + CreatedBy: createdBy, + }, nil +} diff --git a/registry/app/store/database/cleanup_policy.go b/registry/app/store/database/cleanup_policy.go new file mode 100644 index 000000000..6d750b74f --- /dev/null +++ b/registry/app/store/database/cleanup_policy.go @@ -0,0 +1,393 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/common/lib/errors" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/registry/types/enum" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + "github.com/rs/zerolog/log" +) + +type CleanupPolicyDao struct { + db *sqlx.DB + tx dbtx.Transactor +} + +type CleanupPolicyDB struct { + ID int64 `db:"cp_id"` + RegistryID int64 `db:"cp_registry_id"` + Name string `db:"cp_name"` + ExpiryTimeInMs int64 `db:"cp_expiry_time_ms"` + CreatedAt int64 `db:"cp_created_at"` + UpdatedAt int64 `db:"cp_updated_at"` + CreatedBy int64 `db:"cp_created_by"` + UpdatedBy int64 `db:"cp_updated_by"` +} + +type CleanupPolicyPrefixMappingDB struct { + PrefixID int64 `db:"cpp_id"` + CleanupPolicyID int64 `db:"cpp_cleanup_policy_id"` + Prefix string `db:"cpp_prefix"` + PrefixType enum.PrefixType `db:"cpp_prefix_type"` +} + +type CleanupPolicyJoinMapping struct { + CleanupPolicyDB + CleanupPolicyPrefixMappingDB +} + +func NewCleanupPolicyDao(db *sqlx.DB, tx dbtx.Transactor) store.CleanupPolicyRepository { + return &CleanupPolicyDao{ + db: db, + tx: tx, + } +} + +func (c CleanupPolicyDao) GetIDsByRegistryID(ctx context.Context, id int64) (ids []int64, err error) { + stmt := databaseg.Builder.Select("cp_id").From("cleanup_policies"). + Where("cp_registry_id = ?", id) + db := dbtx.GetAccessor(ctx, c.db) + var res []int64 + query, args, err := stmt.ToSql() + if err != nil { + return nil, err + } + if err = db.SelectContext(ctx, &res, query, args...); err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, databaseg.ProcessSQLErrorf( + ctx, err, + "failed to get cleanup policy ids by registry id %d", id, + ) + } + } + + return res, nil +} + +func (c CleanupPolicyDao) GetByRegistryID( + ctx context.Context, + id int64, +) (cleanupPolicies *[]types.CleanupPolicy, err error) { + stmt := databaseg.Builder.Select( + "cp_id", + "cp_registry_id", + "cp_name", + "cp_expiry_time_ms", + "cp_created_at", + "cp_updated_at", + "cp_created_by", + "cp_updated_by", + "cpp_id", + "cpp_cleanup_policy_id", + "cpp_prefix", + "cpp_prefix_type", + ). + From("cleanup_policies"). + Join("cleanup_policy_prefix_mappings ON cp_id = cpp_cleanup_policy_id"). + Where("cp_registry_id = ?", id) + + db := dbtx.GetAccessor(ctx, c.db) + query, args, err := stmt.ToSql() + if err != nil { + return nil, err + } + + rows, err := db.QueryxContext(ctx, query, args...) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, databaseg.ProcessSQLErrorf( + ctx, err, + "failed to get cleanup policy ids by registry id %d", id, + ) + } + } + + defer func(rows *sqlx.Rows) { + err := rows.Close() + if err != nil { + log.Ctx(ctx).Error().Msgf("failed to close rows: %v", err) + } + }(rows) + + return c.mapToCleanupPolicies(ctx, rows) +} + +func (c CleanupPolicyDao) Create(ctx context.Context, cleanupPolicy *types.CleanupPolicy) (id int64, err error) { + const sqlQuery = ` + INSERT INTO cleanup_policies ( + cp_registry_id + ,cp_name + ,cp_expiry_time_ms + ,cp_created_at + ,cp_updated_at + ,cp_created_by + ,cp_updated_by + ) values ( + :cp_registry_id + ,:cp_name + ,:cp_expiry_time_ms + ,:cp_created_at + ,:cp_updated_at + ,:cp_created_by + ,:cp_updated_by + ) RETURNING cp_id` + + db := dbtx.GetAccessor(ctx, c.db) + + // insert repo first so we get id + query, arg, err := db.BindNamed(sqlQuery, c.mapToInternalCleanupPolicy(ctx, cleanupPolicy)) + if err != nil { + return 0, databaseg.ProcessSQLErrorf( + ctx, + err, "Failed to bind cleanup policy object", + ) + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&cleanupPolicy.ID); err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + + return cleanupPolicy.ID, nil +} + +func (c CleanupPolicyDao) createPrefixMapping( + ctx context.Context, + mapping CleanupPolicyPrefixMappingDB, +) (id int64, err error) { + const sqlQuery = ` + INSERT INTO cleanup_policy_prefix_mappings ( + cpp_cleanup_policy_id + ,cpp_prefix + ,cpp_prefix_type + ) values ( + :cpp_cleanup_policy_id + ,:cpp_prefix + ,:cpp_prefix_type + ) RETURNING cpp_id` + + db := dbtx.GetAccessor(ctx, c.db) + + // insert repo first so we get id + query, arg, err := db.BindNamed(sqlQuery, mapping) + if err != nil { + return 0, databaseg.ProcessSQLErrorf( + ctx, err, + "Failed to bind cleanup policy object", + ) + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&mapping.PrefixID); err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + + return mapping.PrefixID, nil +} + +// Delete deletes a cleanup policy by id +// It doesn't cleanup the prefix mapping as they are removed by the database cascade. +func (c CleanupPolicyDao) Delete(ctx context.Context, id int64) (err error) { + stmt := databaseg.Builder.Delete("cleanup_policies").Where("cp_id = ?", id) + query, args, err := stmt.ToSql() + if err != nil { + return err + } + + db := dbtx.GetAccessor(ctx, c.db) + _, err = db.ExecContext(ctx, query, args...) + if err != nil { + return databaseg.ProcessSQLErrorf( + ctx, err, + "failed to delete cleanup policy %d", id, + ) + } + return nil +} + +func (c CleanupPolicyDao) deleteCleanupPolicies(ctx context.Context, ids []int64) error { + query, args, err := sqlx.In("DELETE FROM cleanup_policies WHERE cp_id IN (?)", ids) + if err != nil { + return err + } + + query = c.db.Rebind(query) + db := dbtx.GetAccessor(ctx, c.db) + _, err = db.ExecContext(ctx, query, args...) + if err != nil { + return databaseg.ProcessSQLErrorf( + ctx, err, + "failed to delete cleanup policies %v", ids, + ) + } + return nil +} + +func (c CleanupPolicyDao) ModifyCleanupPolicies( + ctx context.Context, + cleanupPolicies *[]types.CleanupPolicy, + ids []int64, +) error { + err := c.tx.WithTx( + ctx, func(ctx context.Context) error { + if len(ids) > 0 { + err := c.deleteCleanupPolicies(ctx, ids) + if err != nil { + return err + } + } + + if !commons.IsEmpty(cleanupPolicies) { + for _, cp := range *cleanupPolicies { + cpCopy := cp // Create a copy of cp to avoid implicit memory aliasing + id, err := c.Create(ctx, &cpCopy) + if err != nil { + return err + } + + cp.ID = id + err2 := c.createPrefixMappingsInternal(ctx, cp) + if err2 != nil { + return err2 + } + } + } + return nil + }, + ) + return err +} + +func (c CleanupPolicyDao) createPrefixMappingsInternal( + ctx context.Context, + cp types.CleanupPolicy, +) error { + mappings := c.mapToInternalCleanupPolicyMapping(&cp) + for _, m := range *mappings { + _, err := c.createPrefixMapping(ctx, m) + if err != nil { + return err + } + } + return nil +} + +func (c CleanupPolicyDao) mapToInternalCleanupPolicyMapping( + cp *types.CleanupPolicy, +) *[]CleanupPolicyPrefixMappingDB { + result := make([]CleanupPolicyPrefixMappingDB, 0) + if !commons.IsEmpty(cp.PackagePrefix) { + for _, prefix := range cp.PackagePrefix { + result = append( + result, CleanupPolicyPrefixMappingDB{ + CleanupPolicyID: cp.ID, + Prefix: prefix, + PrefixType: enum.PrefixTypePackage, + }, + ) + } + } + if !commons.IsEmpty(cp.VersionPrefix) { + for _, prefix := range cp.VersionPrefix { + result = append( + result, CleanupPolicyPrefixMappingDB{ + CleanupPolicyID: cp.ID, + Prefix: prefix, + PrefixType: enum.PrefixTypeVersion, + }, + ) + } + } + return &result +} + +func (c CleanupPolicyDao) mapToInternalCleanupPolicy( + ctx context.Context, + cp *types.CleanupPolicy, +) *CleanupPolicyDB { + if cp.CreatedAt.IsZero() { + cp.CreatedAt = time.Now() + } + + cp.UpdatedAt = time.Now() + + session, _ := request.AuthSessionFrom(ctx) + if cp.CreatedBy == 0 { + cp.CreatedBy = session.Principal.ID + } + cp.UpdatedBy = session.Principal.ID + + return &CleanupPolicyDB{ + ID: cp.ID, + RegistryID: cp.RegistryID, + Name: cp.Name, + ExpiryTimeInMs: cp.ExpiryTime, + CreatedAt: cp.CreatedAt.UnixMilli(), + UpdatedAt: cp.UpdatedAt.UnixMilli(), + CreatedBy: cp.CreatedBy, + UpdatedBy: cp.UpdatedBy, + } +} + +func (c CleanupPolicyDao) mapToCleanupPolicies( + _ context.Context, + rows *sqlx.Rows, +) (*[]types.CleanupPolicy, error) { + cleanupPolicies := make(map[int64]*types.CleanupPolicy) + + for rows.Next() { + var cp CleanupPolicyJoinMapping + if err := rows.StructScan(&cp); err != nil { + return nil, + errors.Wrap(err, "failed to scan cleanup policy") + } + + if _, exists := cleanupPolicies[cp.ID]; !exists { + cleanupPolicies[cp.ID] = &types.CleanupPolicy{ + ID: cp.ID, + RegistryID: cp.RegistryID, + Name: cp.Name, + ExpiryTime: cp.ExpiryTimeInMs, + CreatedAt: time.UnixMilli(cp.CreatedAt), + UpdatedAt: time.UnixMilli(cp.UpdatedAt), + PackagePrefix: make([]string, 0), + VersionPrefix: make([]string, 0), + } + } + + if cp.PrefixType == enum.PrefixTypePackage { + cleanupPolicies[cp.ID].PackagePrefix = append(cleanupPolicies[cp.ID].PackagePrefix, cp.Prefix) + } + + if cp.PrefixType == enum.PrefixTypeVersion { + cleanupPolicies[cp.ID].VersionPrefix = append(cleanupPolicies[cp.ID].VersionPrefix, cp.Prefix) + } + } + var result []types.CleanupPolicy + for _, cp := range cleanupPolicies { + result = append(result, *cp) + } + return &result, nil +} diff --git a/registry/app/store/database/layer.go b/registry/app/store/database/layer.go new file mode 100644 index 000000000..f483cbdb0 --- /dev/null +++ b/registry/app/store/database/layer.go @@ -0,0 +1,141 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" +) + +type layersDao struct { + db *sqlx.DB + mtRepository store.MediaTypesRepository +} + +func NewLayersDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.LayerRepository { + return &layersDao{ + db: db, + mtRepository: mtRepository, + } +} + +type layersDB struct { + ID int64 `db:"layer_id"` + RegistryID int64 `db:"layer_registry_id"` + ManifestID int64 `db:"layer_manifest_id"` + MediaTypeID int64 `db:"layer_media_type_id"` + BlobID int64 `db:"layer_blob_id"` + Size int64 `db:"layer_size"` + CreatedAt int64 `db:"layer_created_at"` + UpdatedAt int64 `db:"layer_updated_at"` + CreatedBy int64 `db:"layer_created_by"` + UpdatedBy int64 `db:"layer_updated_by"` +} + +func (l layersDao) AssociateLayerBlob(ctx context.Context, m *types.Manifest, + b *types.Blob) error { + const sqlQuery = ` + INSERT INTO layers ( + layer_registry_id + ,layer_manifest_id + ,layer_media_type_id + ,layer_blob_id + ,layer_size + ,layer_created_at + ,layer_updated_at + ,layer_created_by + ,layer_updated_by + ) VALUES ( + :layer_registry_id + ,:layer_manifest_id + ,:layer_media_type_id + ,:layer_blob_id + ,:layer_size + ,:layer_created_at + ,:layer_updated_at + ,:layer_created_by + ,:layer_updated_by + ) ON CONFLICT (layer_registry_id, layer_manifest_id, layer_blob_id) + DO NOTHING + RETURNING layer_id` + + mediaTypeID, err := l.mtRepository.MapMediaType(ctx, b.MediaType) + if err != nil { + return err + } + + layer := &types.Layer{ + RegistryID: m.RegistryID, + ManifestID: m.ID, + MediaTypeID: mediaTypeID, + BlobID: b.ID, + Size: b.Size, + } + + db := dbtx.GetAccessor(ctx, l.db) + query, arg, err := db.BindNamed(sqlQuery, l.mapToInternalLayer(ctx, layer)) + if err != nil { + return database.ProcessSQLErrorf(ctx, err, "Bind query failed") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&layer.ID); err != nil { + err = database.ProcessSQLErrorf(ctx, err, "QueryRowContext failed") + if errors.Is(err, store2.ErrDuplicate) { + return nil + } + if errors.Is(err, store2.ErrForeignKeyViolation) { + return util.ErrRefManifestNotFound + } + return fmt.Errorf("failed to associate layer blob: %w", err) + } + return nil +} + +func (l layersDao) mapToInternalLayer(ctx context.Context, in *types.Layer) *layersDB { + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + return &layersDB{ + ID: in.ID, + RegistryID: in.RegistryID, + ManifestID: in.ManifestID, + MediaTypeID: in.MediaTypeID, + BlobID: in.BlobID, + Size: in.Size, + CreatedAt: in.CreatedAt.Unix(), + UpdatedAt: in.UpdatedAt.Unix(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + } +} diff --git a/registry/app/store/database/manifest.go b/registry/app/store/database/manifest.go new file mode 100644 index 000000000..d74d4f569 --- /dev/null +++ b/registry/app/store/database/manifest.go @@ -0,0 +1,774 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + "github.com/opencontainers/go-digest" + errors2 "github.com/pkg/errors" +) + +type manifestDao struct { + sqlDB *sqlx.DB + mtRepository store.MediaTypesRepository +} + +func NewManifestDao(sqlDB *sqlx.DB, mtRepository store.MediaTypesRepository) store.ManifestRepository { + return &manifestDao{ + sqlDB: sqlDB, + mtRepository: mtRepository, + } +} + +var ( + PrimaryInsertQuery = ` + INSERT INTO manifests ( + manifest_registry_id, + manifest_schema_version, + manifest_media_type_id, + manifest_artifact_media_type, + manifest_total_size, + manifest_configuration_media_type, + manifest_configuration_payload, + manifest_configuration_blob_id, + manifest_configuration_digest, + manifest_digest, + manifest_payload, + manifest_non_conformant, + manifest_non_distributable_layers, + manifest_subject_id, + manifest_subject_digest, + manifest_annotations, + manifest_image_name, + manifest_created_at, + manifest_created_by, + manifest_updated_at, + manifest_updated_by + ) VALUES ( + :manifest_registry_id, + :manifest_schema_version, + :manifest_media_type_id, + :manifest_artifact_media_type, + :manifest_total_size, + :manifest_configuration_media_type, + :manifest_configuration_payload, + :manifest_configuration_blob_id, + :manifest_configuration_digest, + :manifest_digest, + :manifest_payload, + :manifest_non_conformant, + :manifest_non_distributable_layers, + :manifest_subject_id, + :manifest_subject_digest, + :manifest_annotations, + :manifest_image_name, + :manifest_created_at, + :manifest_created_by, + :manifest_updated_at, + :manifest_updated_by + ) RETURNING manifest_id` + + InsertQueryWithConflictHandling = ` + INSERT INTO manifests ( + manifest_registry_id, + manifest_schema_version, + manifest_media_type_id, + manifest_artifact_media_type, + manifest_total_size, + manifest_configuration_media_type, + manifest_configuration_payload, + manifest_configuration_blob_id, + manifest_configuration_digest, + manifest_digest, + manifest_payload, + manifest_non_conformant, + manifest_non_distributable_layers, + manifest_subject_id, + manifest_subject_digest, + manifest_annotations, + manifest_image_name, + manifest_created_at, + manifest_created_by, + manifest_updated_at, + manifest_updated_by + ) VALUES ( + :manifest_registry_id, + :manifest_schema_version, + :manifest_media_type_id, + :manifest_artifact_media_type, + :manifest_total_size, + :manifest_configuration_media_type, + :manifest_configuration_payload, + :manifest_configuration_blob_id, + :manifest_configuration_digest, + :manifest_digest, + :manifest_payload, + :manifest_non_conformant, + :manifest_non_distributable_layers, + :manifest_subject_id, + :manifest_subject_digest, + :manifest_annotations, + :manifest_image_name, + :manifest_created_at, + :manifest_created_by, + :manifest_updated_at, + :manifest_updated_by + ) ON CONFLICT (manifest_registry_id, manifest_image_name, manifest_digest) DO NOTHING + RETURNING manifest_id` + + ReadQuery = database.Builder.Select( + "manifest_id", "manifest_registry_id", + "manifest_total_size", "manifest_schema_version", + "manifest_media_type_id", "mt_media_type", "manifest_artifact_media_type", + "manifest_digest", "manifest_payload", + "manifest_configuration_blob_id", "manifest_configuration_media_type", + "manifest_configuration_digest", + "manifest_configuration_payload", "manifest_non_conformant", + "manifest_non_distributable_layers", "manifest_subject_id", + "manifest_subject_digest", "manifest_annotations", "manifest_created_at", + "manifest_created_by", "manifest_updated_at", "manifest_updated_by", "manifest_image_name", + ). + From("manifests"). + Join("media_types ON mt_id = manifest_media_type_id") +) + +// Manifest holds the record of a manifest in DB. +type manifestDB struct { + ID int64 `db:"manifest_id"` + RegistryID int64 `db:"manifest_registry_id"` + TotalSize int64 `db:"manifest_total_size"` + SchemaVersion int `db:"manifest_schema_version"` + MediaTypeID int64 `db:"manifest_media_type_id"` + ImageName string `db:"manifest_image_name"` + ArtifactMediaType sql.NullString `db:"manifest_artifact_media_type"` + Digest []byte `db:"manifest_digest"` + Payload []byte `db:"manifest_payload"` + ConfigurationMediaType string `db:"manifest_configuration_media_type"` + ConfigurationPayload []byte `db:"manifest_configuration_payload"` + ConfigurationDigest []byte `db:"manifest_configuration_digest"` + ConfigurationBlobID sql.NullInt64 `db:"manifest_configuration_blob_id"` + SubjectID sql.NullInt64 `db:"manifest_subject_id"` + SubjectDigest []byte `db:"manifest_subject_digest"` + NonConformant bool `db:"manifest_non_conformant"` + // NonDistributableLayers identifies whether a manifest + // references foreign/non-distributable layers. For now, we are + // not registering metadata about these layers, + // but we may wish to backfill that metadata in the future by parsing + // the manifest payload. + NonDistributableLayers bool `db:"manifest_non_distributable_layers"` + Annotations []byte `db:"manifest_annotations"` + CreatedAt int64 `db:"manifest_created_at"` + CreatedBy int64 `db:"manifest_created_by"` + UpdatedAt int64 `db:"manifest_updated_at"` + UpdatedBy int64 `db:"manifest_updated_by"` +} + +type manifestMetadataDB struct { + manifestDB + MediaType string `db:"mt_media_type"` +} + +// FindAll finds all manifests. +func (dao manifestDao) FindAll(_ context.Context) ( + types.Manifests, error, +) { + // TODO implement me + panic("implement me") +} + +func (dao manifestDao) Count(_ context.Context) (int, error) { + // TODO implement me + panic("implement me") +} + +func (dao manifestDao) LayerBlobs( + _ context.Context, + _ *types.Manifest, +) (types.Blobs, error) { + // TODO implement me + panic("implement me") +} + +// References finds all manifests directly referenced by a manifest (if any). +func (dao manifestDao) References( + ctx context.Context, + m *types.Manifest, +) (types.Manifests, error) { + stmt := ReadQuery.Join("manifest_references ON manifest_ref_child_id = manifest_id"). + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where("manifest_ref_registry_id = ?", m.RegistryID).Where("manifest_ref_parent_id = ?", m.ID) + + db := dbtx.GetAccessor(ctx, dao.sqlDB) + dst := []*manifestMetadataDB{} + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, errors2.Wrap(err, "Failed to convert query to sql") + } + + if err = db.SelectContext(ctx, &dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifests during references") + return nil, err + } + + result, err := dao.mapToManifests(dst) + if err != nil { + return nil, fmt.Errorf("finding referenced manifests: %w", err) + } + return *result, err +} + +func (dao manifestDao) Create(ctx context.Context, m *types.Manifest) error { + mediaTypeID, err := dao.mtRepository.MapMediaType(ctx, m.MediaType) + if err != nil { + return fmt.Errorf("mapping manifest media type: %w", err) + } + m.MediaTypeID = mediaTypeID + + db := dbtx.GetAccessor(ctx, dao.sqlDB) + manifest, err := mapToInternalManifest(ctx, m) + if err != nil { + return err + } + + query, arg, err := db.BindNamed(PrimaryInsertQuery, manifest) + if err != nil { + return database.ProcessSQLErrorf(ctx, err, "Failed to bind manifest object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&manifest.ID); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Insert query failed") + if !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + } + m.ID = manifest.ID + return nil +} + +func (dao manifestDao) CreateOrFind(ctx context.Context, m *types.Manifest) error { + dgst, err := types.NewDigest(m.Digest) + if err != nil { + return err + } + + mediaTypeID, err := dao.mtRepository.MapMediaType(ctx, m.MediaType) + if err != nil { + return fmt.Errorf("mapping manifest media type: %w", err) + } + m.MediaTypeID = mediaTypeID + + db := dbtx.GetAccessor(ctx, dao.sqlDB) + manifest, err := mapToInternalManifest(ctx, m) + if err != nil { + return err + } + + query, arg, err := db.BindNamed(InsertQueryWithConflictHandling, manifest) + if err != nil { + return database.ProcessSQLErrorf(ctx, err, "Failed to bind manifest object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&manifest.ID); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Insert query failed") + if !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + result, err := dao.FindManifestByDigest(ctx, m.RegistryID, m.ImageName, dgst) + if err != nil { + return err + } + m.ID = result.ID + return nil + } + + m.ID = manifest.ID + return nil +} + +func (dao manifestDao) AssociateLayerBlob( + _ context.Context, + _ *types.Manifest, + _ *types.Blob, +) error { + // TODO implement me + panic("implement me") +} + +func (dao manifestDao) DissociateLayerBlob( + _ context.Context, + _ *types.Manifest, + _ *types.Blob, +) error { + // TODO implement me + panic("implement me") +} + +func (dao manifestDao) Delete(ctx context.Context, registryID, id int64) error { + _, err := dao.FindManifestByID(ctx, registryID, id) + if err != nil { + if errors.Is(err, store2.ErrResourceNotFound) { + return nil + } + return fmt.Errorf("failed to get the manifest: %w", err) + } + + stmt := database.Builder.Delete("manifests"). + Where("manifest_registry_id = ? AND manifest_id = ?", registryID, id) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + _, err = db.ExecContext(ctx, toSQL, args...) + if err != nil { + return database.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + return nil +} + +func (dao manifestDao) DeleteManifest( + ctx context.Context, repoID int64, + imageName string, d digest.Digest, +) (bool, error) { + digestBytes, err := types.GetDigestBytes(d) + if err != nil { + return false, err + } + stmt := database.Builder.Delete("manifests"). + Where( + "manifest_registry_id = ? AND manifest_image_name = ? AND manifest_digest = ?", + repoID, imageName, digestBytes, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + r, err := db.ExecContext(ctx, toSQL, args...) + if err != nil { + return false, database.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + count, _ := r.RowsAffected() + return count == 1, nil +} + +func (dao manifestDao) FindManifestByID( + ctx context.Context, + registryID, + id int64, +) (*types.Manifest, error) { + stmt := database.Builder.Select("manifest_digest").From("manifests"). + Where("manifest_id = ?", id).Where("manifest_registry_id = ?", registryID) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert find manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + return dao.mapToManifest(dst) +} + +func (dao manifestDao) FindManifestByDigest( + ctx context.Context, repoID int64, + imageName string, digest types.Digest, +) (*types.Manifest, error) { + digestBytes, err := util.GetHexDecodedBytes(string(digest)) + if err != nil { + return nil, err + } + + stmt := ReadQuery. + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where( + "manifest_registry_id = ? AND manifest_image_name = ? AND manifest_digest = ?", + repoID, imageName, digestBytes, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + return dao.mapToManifest(dst) +} + +func (dao manifestDao) ListManifestsBySubjectDigest( + ctx context.Context, repoID int64, + digest types.Digest, +) (types.Manifests, error) { + digestBytes, err := util.GetHexDecodedBytes(string(digest)) + if err != nil { + return nil, err + } + + stmt := ReadQuery. + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where( + "manifest_registry_id = ? AND manifest_subject_digest = ?", + repoID, digestBytes, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := []*manifestMetadataDB{} + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.SelectContext(ctx, &dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to list manifests") + return nil, err + } + + result, err := dao.mapToManifests(dst) + if err != nil { + return nil, fmt.Errorf("finding manifests by subject digest: %w", err) + } + return *result, err +} + +// FindManifestByTagName finds a manifest by tag name within a repository. +func (dao manifestDao) FindManifestByTagName( + ctx context.Context, repoID int64, + imageName string, tag string, +) (*types.Manifest, error) { + stmt := ReadQuery. + Join("tags t ON t.tag_registry_id = manifest_registry_id AND t.tag_manifest_id = manifest_id"). + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where( + "manifest_registry_id = ? AND manifest_image_name = ? AND t.tag_name = ?", + repoID, imageName, tag, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + return dao.mapToManifest(dst) +} + +func (dao manifestDao) GetManifestPayload( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + digest types.Digest, +) (*types.Payload, error) { + digestBytes, err := util.GetHexDecodedBytes(string(digest)) + if err != nil { + return nil, err + } + + stmt := ReadQuery.Join("registries r ON r.registry_id = manifest_registry_id"). + Where( + "r.registry_parent_id = ? AND r.registry_name = ? AND "+ + "manifest_image_name = ? AND manifest_digest = ?", + parentID, repoKey, imageName, digestBytes, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest payload") + return nil, err + } + + m, err := dao.mapToManifest(dst) + if err != nil { + return nil, err + } + return &m.Payload, nil +} + +func (dao manifestDao) FindManifestPayloadByTagName( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + version string, +) (*types.Payload, error) { + stmt := ReadQuery.Join("registries r ON r.registry_id = manifest_registry_id"). + Join("tags t ON t.tag_manifest_id = manifest_id"). + Where( + "r.registry_parent_id = ? AND r.registry_name = ?"+ + " AND manifest_image_name = ? AND t.tag_name = ?", + parentID, repoKey, imageName, version, + ) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + m, err := dao.mapToManifest(dst) + if err != nil { + return nil, err + } + return &m.Payload, nil +} + +func (dao manifestDao) Get(ctx context.Context, manifestID int64) (*types.Manifest, error) { + stmt := ReadQuery. + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where("manifest_id = ?", manifestID) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := new(manifestMetadataDB) + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.GetContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + return dao.mapToManifest(dst) +} + +func (dao manifestDao) ListManifestsBySubject( + ctx context.Context, + repoID int64, id int64, +) (types.Manifests, error) { + stmt := ReadQuery. + LeftJoin("blobs ON manifest_configuration_blob_id = blob_id"). + Where("manifest_registry_id = ? AND manifest_subject_id = ?", repoID, id) + + toSQL, args, err := stmt.ToSql() + if err != nil { + return nil, fmt.Errorf("failed to convert manifest query to sql: %w", err) + } + + dst := []*manifestMetadataDB{} + db := dbtx.GetAccessor(ctx, dao.sqlDB) + + if err = db.SelectContext(ctx, dst, toSQL, args...); err != nil { + err := database.ProcessSQLErrorf(ctx, err, "Failed to find manifest") + return nil, err + } + + result, err := dao.mapToManifests(dst) + if err != nil { + return nil, err + } + + return *result, nil +} + +func mapToInternalManifest(ctx context.Context, in *types.Manifest) (*manifestDB, error) { + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + digestBytes, err := types.GetDigestBytes(in.Digest) + if err != nil { + return nil, err + } + + var configBlobID sql.NullInt64 + var configPayload types.Payload + var configMediaType string + var cfgDigestBytes []byte + if in.Configuration != nil { + configPayload = in.Configuration.Payload + configMediaType = in.Configuration.MediaType + configBlobID = sql.NullInt64{Int64: in.Configuration.BlobID, Valid: true} + + cfgDigestBytes, err = types.GetDigestBytes(in.Configuration.Digest) + if err != nil { + return nil, err + } + } + + sbjDigestBytes, err := types.GetDigestBytes(in.SubjectDigest) + if err != nil { + return nil, err + } + + annot, err := json.Marshal(in.Annotations) + if err != nil { + return nil, err + } + + return &manifestDB{ + ID: in.ID, + RegistryID: in.RegistryID, + TotalSize: in.TotalSize, + SchemaVersion: in.SchemaVersion, + MediaTypeID: in.MediaTypeID, + ArtifactMediaType: in.ArtifactType, + Digest: digestBytes, + Payload: in.Payload, + ConfigurationBlobID: configBlobID, + ConfigurationMediaType: configMediaType, + ConfigurationPayload: configPayload, + ConfigurationDigest: cfgDigestBytes, + NonConformant: in.NonConformant, + NonDistributableLayers: in.NonDistributableLayers, + SubjectID: in.SubjectID, + SubjectDigest: sbjDigestBytes, + Annotations: annot, + ImageName: in.ImageName, + CreatedAt: in.CreatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + }, nil +} + +func (dao manifestDao) mapToManifest(dst *manifestMetadataDB) (*types.Manifest, error) { + // Converting []byte digest into Digest + dgst := types.Digest(util.GetHexEncodedString(dst.Digest)) + parsedDigest, err := dgst.Parse() + if err != nil { + return nil, err + } + + // Converting Configuration []byte digest into Digest + cfgDigest := types.Digest(util.GetHexEncodedString(dst.ConfigurationDigest)) + cfgParsedDigest, err := cfgDigest.Parse() + if err != nil { + return nil, err + } + + // Converting Subject []byte digest into Digest + sbjDigest := types.Digest(util.GetHexEncodedString(dst.SubjectDigest)) + sbjParsedDigest, err := sbjDigest.Parse() + if err != nil { + return nil, err + } + + var annot map[string]string + err = json.Unmarshal(dst.Annotations, &annot) + if err != nil { + return nil, err + } + + m := &types.Manifest{ + ID: dst.ID, + RegistryID: dst.RegistryID, + TotalSize: dst.TotalSize, + SchemaVersion: dst.SchemaVersion, + MediaTypeID: dst.MediaTypeID, + MediaType: dst.MediaType, + ArtifactType: dst.ArtifactMediaType, + Digest: parsedDigest, + Payload: dst.Payload, + NonConformant: dst.NonConformant, + NonDistributableLayers: dst.NonDistributableLayers, + SubjectID: dst.SubjectID, + SubjectDigest: sbjParsedDigest, + Annotations: annot, + ImageName: dst.ImageName, + CreatedAt: time.UnixMilli(dst.CreatedAt), + } + + if dst.ConfigurationBlobID.Valid { + m.Configuration = &types.Configuration{ + BlobID: dst.ConfigurationBlobID.Int64, + MediaType: dst.ConfigurationMediaType, + Digest: cfgParsedDigest, + Payload: dst.ConfigurationPayload, + } + } + + return m, nil +} + +func (dao manifestDao) mapToManifests(dst []*manifestMetadataDB) (*types.Manifests, error) { + mm := make(types.Manifests, 0, len(dst)) + + for _, d := range dst { + m, err := dao.mapToManifest(d) + if err != nil { + return nil, err + } + mm = append(mm, m) + } + + return &mm, nil +} diff --git a/registry/app/store/database/manifest_reference.go b/registry/app/store/database/manifest_reference.go new file mode 100644 index 000000000..1cb898214 --- /dev/null +++ b/registry/app/store/database/manifest_reference.go @@ -0,0 +1,138 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" +) + +type manifestReferenceDao struct { + db *sqlx.DB +} + +func NewManifestReferenceDao(db *sqlx.DB) store.ManifestReferenceRepository { + return &manifestReferenceDao{ + db: db, + } +} + +type manifestReferenceDB struct { + ID int64 `db:"manifest_ref_id"` + RegistryID int64 `db:"manifest_ref_registry_id"` + ParentID int64 `db:"manifest_ref_parent_id"` + ChildID int64 `db:"manifest_ref_child_id"` + CreatedAt int64 `db:"manifest_ref_created_at"` + UpdatedAt int64 `db:"manifest_ref_updated_at"` + CreatedBy int64 `db:"manifest_ref_created_by"` + UpdatedBy int64 `db:"manifest_ref_updated_by"` +} + +func (dao manifestReferenceDao) AssociateManifest( + ctx context.Context, + ml *types.Manifest, m *types.Manifest, +) error { + if ml.ID == m.ID { + return fmt.Errorf("cannot associate a manifest with itself") + } + const sqlQuery = ` + INSERT INTO manifest_references ( + manifest_ref_registry_id + ,manifest_ref_parent_id + ,manifest_ref_child_id + ,manifest_ref_created_at + ,manifest_ref_updated_at + ,manifest_ref_created_by + ,manifest_ref_updated_by + ) VALUES ( + :manifest_ref_registry_id + ,:manifest_ref_parent_id + ,:manifest_ref_child_id + ,:manifest_ref_created_at + ,:manifest_ref_updated_at + ,:manifest_ref_created_by + ,:manifest_ref_updated_by + ) ON CONFLICT (manifest_ref_registry_id, manifest_ref_parent_id, manifest_ref_child_id) + DO NOTHING + RETURNING manifest_ref_id` + + manifestRef := &types.ManifestReference{ + RegistryID: ml.RegistryID, + ParentID: ml.ID, + ChildID: m.ID, + } + + db := dbtx.GetAccessor(ctx, dao.db) + query, arg, err := db.BindNamed(sqlQuery, mapToInternalManifestReference(ctx, manifestRef)) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Bind query failed") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&manifestRef.ID); err != nil { + err = databaseg.ProcessSQLErrorf(ctx, err, "QueryRowContext failed") + if errors.Is(err, store2.ErrDuplicate) { + return nil + } + if errors.Is(err, store2.ErrForeignKeyViolation) { + return util.ErrRefManifestNotFound + } + return fmt.Errorf("inserting manifest reference: %w", err) + } + return nil +} + +func (dao manifestReferenceDao) DissociateManifest( + _ context.Context, + _ *types.Manifest, + _ *types.Manifest, +) error { + // TODO implement me + panic("implement me") +} + +func mapToInternalManifestReference(ctx context.Context, in *types.ManifestReference) *manifestReferenceDB { + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + return &manifestReferenceDB{ + ID: in.ID, + RegistryID: in.RegistryID, + ParentID: in.ParentID, + ChildID: in.ChildID, + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + } +} diff --git a/registry/app/store/database/mediatype.go b/registry/app/store/database/mediatype.go new file mode 100644 index 000000000..f0f919847 --- /dev/null +++ b/registry/app/store/database/mediatype.go @@ -0,0 +1,73 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + errors2 "github.com/pkg/errors" +) + +type mediaTypesDao struct { + db *sqlx.DB +} + +func NewMediaTypesDao(db *sqlx.DB) store.MediaTypesRepository { + return &mediaTypesDao{ + db: db, + } +} + +func (mt mediaTypesDao) MediaTypeExists(ctx context.Context, mediaType string) (bool, error) { + stmt := database.Builder.Select("EXISTS (SELECT 1 FROM media_types WHERE mt_media_type = ?)") + sql, args, err := stmt.ToSql() + if err != nil { + return false, errors2.Wrap(err, "Failed to convert query to sql") + } + args = append(args, mediaType) + + var exists bool + db := dbtx.GetAccessor(ctx, mt.db) + + if err = db.GetContext(ctx, &exists, sql, args...); err != nil { + return false, database.ProcessSQLErrorf(ctx, err, "Failed to check if media type exists") + } + + return exists, nil +} + +func (mt mediaTypesDao) MapMediaType(ctx context.Context, mediaType string) (int64, error) { + stmt := database.Builder.Select("mt_id"). + From("media_types"). + Where("mt_media_type = ?", mediaType) + + db := dbtx.GetAccessor(ctx, mt.db) + var id int64 + sql, args, err := stmt.ToSql() + if err != nil { + return 0, errors2.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, &id, sql, args...); err != nil { + return 0, database.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + + return id, nil +} diff --git a/registry/app/store/database/registry.go b/registry/app/store/database/registry.go new file mode 100644 index 000000000..ab477a7df --- /dev/null +++ b/registry/app/store/database/registry.go @@ -0,0 +1,535 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/pkg/commons" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + gitness_store "github.com/harness/gitness/store" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + sq "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +type registryDao struct { + db *sqlx.DB + + //FIXME: Arvind: Move this to controller layer later + mtRepository store.MediaTypesRepository +} + +func NewRegistryDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.RegistryRepository { + return ®istryDao{ + db: db, + + //FIXME: Arvind: Move this to controller layer later + mtRepository: mtRepository, + } +} + +// registryDB holds the record of a registry in DB. +type registryDB struct { + ID int64 `db:"registry_id"` + Name string `db:"registry_name"` + ParentID int64 `db:"registry_parent_id"` + RootParentID int64 `db:"registry_root_parent_id"` + Description sql.NullString `db:"registry_description"` + Type artifact.RegistryType `db:"registry_type"` + PackageType artifact.PackageType `db:"registry_package_type"` + UpstreamProxies sql.NullString `db:"registry_upstream_proxies"` + AllowedPattern sql.NullString `db:"registry_allowed_pattern"` + BlockedPattern sql.NullString `db:"registry_blocked_pattern"` + Labels sql.NullString `db:"registry_labels"` + CreatedAt int64 `db:"registry_created_at"` + UpdatedAt int64 `db:"registry_updated_at"` + CreatedBy int64 `db:"registry_created_by"` + UpdatedBy int64 `db:"registry_updated_by"` +} + +func (r registryDao) Get(ctx context.Context, id int64) (*types.Registry, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(registryDB{}), ",")). + From("registries"). + Where("registry_id = ?", id) + + db := dbtx.GetAccessor(ctx, r.db) + + dst := new(registryDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + + return r.mapToRegistry(ctx, dst) +} + +func (r registryDao) GetByParentIDAndName( + ctx context.Context, parentID int64, + name string, +) (*types.Registry, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(registryDB{}), ",")). + From("registries"). + Where("registry_parent_id = ? AND registry_name = ?", parentID, name) + + db := dbtx.GetAccessor(ctx, r.db) + + dst := new(registryDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + + return r.mapToRegistry(ctx, dst) +} + +func (r registryDao) GetByRootParentIDAndName( + ctx context.Context, parentID int64, + name string, +) (*types.Registry, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(registryDB{}), ",")). + From("registries"). + Where("registry_root_parent_id = ? AND registry_name = ?", parentID, name) + + db := dbtx.GetAccessor(ctx, r.db) + + dst := new(registryDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + + return r.mapToRegistry(ctx, dst) +} + +func (r registryDao) FetchUpstreamProxyKeys( + ctx context.Context, + ids []int64, +) (repokeys []string, err error) { + dst := make([]string, 0) + if commons.IsEmpty(ids) { + return dst, nil + } + + stmt := databaseg.Builder. + Select("registry_name"). + From("registries"). + Where(sq.Eq{"registry_id": ids}). + Where("registry_type = ?", artifact.RegistryTypeUPSTREAM) + + db := dbtx.GetAccessor(ctx, r.db) + + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + return dst, nil +} + +func (r registryDao) GetByIDIn(ctx context.Context, parentID int64, ids []int64) (*[]types.Registry, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(registryDB{}), ",")). + From("registries"). + Where("registry_parent_id = ?", parentID). + Where(sq.Eq{"registry_id": ids}) + + db := dbtx.GetAccessor(ctx, r.db) + + dst := []*registryDB{} + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find repo") + } + + return r.mapToRegistries(ctx, dst) +} + +type RegistryMetadataDB struct { + RegIdentifier string `db:"reg_identifier"` + Description sql.NullString `db:"description"` + PackageType artifact.PackageType `db:"package_type"` + Type artifact.RegistryType `db:"type"` + LastModified int64 `db:"last_modified"` + URL sql.NullString `db:"url"` + ArtifactCount int64 `db:"artifact_count"` + DownloadCount int64 `db:"download_count"` + Size int64 `db:"size"` + Labels sql.NullString `db:"registry_labels"` +} + +func (r registryDao) GetAll( + ctx context.Context, + parentID int64, + packageTypes []string, + sortByField string, + sortByOrder string, + limit int, + offset int, + search string, + repoType string, +) (repos *[]store.RegistryMetadata, err error) { + q := databaseg.Builder.Select( + "r.registry_name as reg_identifier,"+ + " r.registry_description as description , "+ + "r.registry_package_type as package_type, r.registry_type as type, r.registry_updated_at as last_modified,"+ + " u.upstream_proxy_config_url as url, COALESCE(t2.artifact_count,0) as artifact_count, "+ + "COALESCE(t3.size,0) as size , r.registry_labels, COALESCE(t4.download_count,0) as download_count ", + ). + From("registries r"). + LeftJoin("upstream_proxy_configs u ON r.registry_id = u.upstream_proxy_config_registry_id"). + LeftJoin( + "(SELECT r.registry_id, count(a.artifact_id) as artifact_count FROM"+ + " registries r LEFT JOIN artifacts a ON r.registry_id = a.artifact_registry_id"+ + " WHERE r.registry_parent_id = ? AND a.artifact_enabled = true GROUP BY r.registry_id ) as t2"+ + " ON r.registry_id = t2.registry_id ", parentID, + ). + LeftJoin( + "(SELECT r.registry_id , COALESCE(sum(b.blob_size),0) as size FROM "+ + "registries r LEFT JOIN registry_blobs rb ON r.registry_id = rblob_registry_id "+ + "LEFT JOIN blobs b ON rblob_blob_id = b.blob_id WHERE r.registry_parent_id = ? "+ + "GROUP BY r.registry_id) as t3 ON r.registry_id = t3.registry_id ", parentID, + ). + LeftJoin( + "(SELECT b.artifact_registry_id as registry_id,"+ + " sum(COALESCE(a.artifact_stat_download_count,0)) as"+ + " download_count FROM artifact_stats a "+ + " LEFT JOIN artifacts b"+ + " ON a.artifact_stat_artifact_id = b.artifact_id LEFT JOIN registries"+ + " c ON b.artifact_registry_id = c.registry_id"+ + " WHERE c.registry_parent_id = ? AND b.artifact_enabled = true GROUP BY b.artifact_registry_id)"+ + " as t4 ON r.registry_id = t4.registry_id", parentID, + ). + Where("r.registry_parent_id = ?", parentID) + + if search != "" { + q = q.Where("r.registry_name LIKE ?", "%"+search+"%") + } + + if len(packageTypes) > 0 { + q = q.Where(sq.Eq{"r.registry_package_type": packageTypes}) + } + if repoType != "" { + q = q.Where("r.registry_type = ?", repoType) + } + + if sortByField == "artifact_count" || sortByField == "size" || sortByField == "download_count" { + q = q.OrderBy(sortByField + " " + sortByOrder).Limit(uint64(limit)).Offset(uint64(offset)) + } else { + q = q.OrderBy("r.registry_" + sortByField + " " + sortByOrder). + Limit(uint64(limit)).Offset(uint64(offset)) + } + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + dst := []*RegistryMetadataDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing custom list query") + } + + return r.mapToRegistryMetadataList(ctx, dst) +} + +func (r registryDao) CountAll( + ctx context.Context, parentID int64, + packageTypes []string, search string, repoType string, +) (int64, error) { + stmt := databaseg.Builder.Select("COUNT(*)"). + From("registries"). + Where("registry_parent_id = ?", parentID) + + if !commons.IsEmpty(search) { + stmt = stmt.Where("registry_name LIKE ?", "%"+search+"%") + } + + if len(packageTypes) > 0 { + stmt = stmt.Where(sq.Eq{"registry_package_type": packageTypes}) + } + + if repoType != "" { + stmt = stmt.Where("registry_type = ?", repoType) + } + + sql, args, err := stmt.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + var count int64 + err = db.QueryRowContext(ctx, sql, args...).Scan(&count) + if err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing count query") + } + return count, nil +} + +func (r registryDao) Create(ctx context.Context, registry *types.Registry) (id int64, err error) { + const sqlQuery = ` + INSERT INTO registries ( + registry_name + ,registry_root_parent_id + ,registry_parent_id + ,registry_description + ,registry_type + ,registry_package_type + ,registry_upstream_proxies + ,registry_allowed_pattern + ,registry_blocked_pattern + ,registry_created_at + ,registry_updated_at + ,registry_created_by + ,registry_updated_by + ,registry_labels + ) VALUES ( + :registry_name + ,:registry_root_parent_id + ,:registry_parent_id + ,:registry_description + ,:registry_type + ,:registry_package_type + ,:registry_upstream_proxies + ,:registry_allowed_pattern + ,:registry_blocked_pattern + ,:registry_created_at + ,:registry_updated_at + ,:registry_created_by + ,:registry_updated_by + ,:registry_labels + ) RETURNING registry_id` + + db := dbtx.GetAccessor(ctx, r.db) + query, arg, err := db.BindNamed(sqlQuery, mapToInternalRegistry(ctx, registry)) + if err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(®istry.ID); err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + + return registry.ID, nil +} + +func mapToInternalRegistry(ctx context.Context, in *types.Registry) *registryDB { + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + return ®istryDB{ + ID: in.ID, + Name: in.Name, + ParentID: in.ParentID, + RootParentID: in.RootParentID, + Description: getEmptySQLString(in.Description), + Type: in.Type, + PackageType: in.PackageType, + UpstreamProxies: getEmptySQLString(util.Int64ArrToString(in.UpstreamProxies)), + AllowedPattern: getEmptySQLString(util.ArrToString(in.AllowedPattern)), + BlockedPattern: getEmptySQLString(util.ArrToString(in.BlockedPattern)), + Labels: getEmptySQLString(util.ArrToString(in.Labels)), + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + } +} + +func getEmptySQLString(str string) sql.NullString { + if commons.IsEmpty(str) { + return sql.NullString{String: str, Valid: false} + } + return sql.NullString{String: str, Valid: true} +} + +func (r registryDao) Delete(ctx context.Context, parentID int64, name string) (err error) { + stmt := databaseg.Builder.Delete("registries"). + Where("registry_parent_id = ? AND registry_name = ?", parentID, name) + + sql, args, err := stmt.ToSql() + if err != nil { + return fmt.Errorf("failed to convert purge registry query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, r.db) + + _, err = db.ExecContext(ctx, sql, args...) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + return nil +} + +func (r registryDao) Update(ctx context.Context, registry *types.Registry) (err error) { + var sqlQuery = " UPDATE registries SET " + util.GetSetDBKeys(registryDB{}, "registry_id") + + " WHERE registry_id = :registry_id " + + dbRepo := mapToInternalRegistry(ctx, registry) + + // update Version (used for optimistic locking) and Updated time + dbRepo.UpdatedAt = time.Now().UnixMilli() + + db := dbtx.GetAccessor(ctx, r.db) + + query, arg, err := db.BindNamed(sqlQuery, dbRepo) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + result, err := db.ExecContext(ctx, query, arg...) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to update repository") + } + + count, err := result.RowsAffected() + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows") + } + + if count == 0 { + return gitness_store.ErrVersionConflict + } + + return nil +} + +func (r registryDao) FetchUpstreamProxyIDs( + ctx context.Context, + repokeys []string, + parentID int64, +) (ids []int64, err error) { + var repoIDs []int64 + stmt := databaseg.Builder.Select("registry_id"). + From("registries"). + Where("registry_parent_id = ?", parentID). + Where(sq.Eq{"registry_name": repokeys}). + Where("registry_type = ?", artifact.RegistryTypeUPSTREAM) + + db := dbtx.GetAccessor(ctx, r.db) + + query, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.SelectContext(ctx, &repoIDs, query, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find registries") + } + + return repoIDs, nil +} + +func (r registryDao) mapToRegistries(ctx context.Context, dst []*registryDB) (*[]types.Registry, error) { + registries := make([]types.Registry, 0, len(dst)) + for _, d := range dst { + repo, err := r.mapToRegistry(ctx, d) + if err != nil { + return nil, err + } + registries = append(registries, *repo) + } + return ®istries, nil +} + +func (r registryDao) mapToRegistry(_ context.Context, dst *registryDB) (*types.Registry, error) { + return &types.Registry{ + ID: dst.ID, + Name: dst.Name, + ParentID: dst.ParentID, + RootParentID: dst.RootParentID, + Description: dst.Description.String, + Type: dst.Type, + PackageType: dst.PackageType, + UpstreamProxies: util.StringToInt64Arr(dst.UpstreamProxies.String), + AllowedPattern: util.StringToArr(dst.AllowedPattern.String), + BlockedPattern: util.StringToArr(dst.BlockedPattern.String), + Labels: util.StringToArr(dst.Labels.String), + CreatedAt: time.UnixMilli(dst.CreatedAt), + UpdatedAt: time.UnixMilli(dst.UpdatedAt), + CreatedBy: dst.CreatedBy, + UpdatedBy: dst.UpdatedBy, + }, nil +} + +func (r registryDao) mapToRegistryMetadataList( + ctx context.Context, + dst []*RegistryMetadataDB, +) (*[]store.RegistryMetadata, error) { + repos := make([]store.RegistryMetadata, 0, len(dst)) + for _, d := range dst { + repo := r.mapToRegistryMetadata(ctx, d) + repos = append(repos, *repo) + } + return &repos, nil +} + +func (r registryDao) mapToRegistryMetadata(_ context.Context, dst *RegistryMetadataDB) *store.RegistryMetadata { + return &store.RegistryMetadata{ + RegIdentifier: dst.RegIdentifier, + Description: dst.Description.String, + PackageType: dst.PackageType, + Type: dst.Type, + LastModified: time.UnixMilli(dst.LastModified), + URL: dst.URL.String, + ArtifactCount: dst.ArtifactCount, + DownloadCount: dst.DownloadCount, + Size: dst.Size, + Labels: util.StringToArr(dst.Labels.String), + } +} diff --git a/registry/app/store/database/registry_blobs.go b/registry/app/store/database/registry_blobs.go new file mode 100644 index 000000000..3c05ac831 --- /dev/null +++ b/registry/app/store/database/registry_blobs.go @@ -0,0 +1,145 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/types" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/jmoiron/sqlx" + "github.com/rs/zerolog/log" +) + +type registryBlobDao struct { + db *sqlx.DB +} + +func NewRegistryBlobDao(db *sqlx.DB) store.RegistryBlobRepository { + return ®istryBlobDao{ + db: db, + } +} + +// registryBlobDB holds the record of a registry_blobs in DB. +type registryBlobDB struct { + ID int64 `db:"rblob_id"` + RegistryID int64 `db:"rblob_registry_id"` + BlobID int64 `db:"rblob_blob_id"` + ImageName string `db:"rblob_image_name"` + CreatedAt int64 `db:"rblob_created_at"` + UpdatedAt int64 `db:"rblob_updated_at"` + CreatedBy int64 `db:"rblob_created_by"` + UpdatedBy int64 `db:"rblob_updated_by"` +} + +func (r registryBlobDao) LinkBlob( + ctx context.Context, imageName string, + registry *types.Registry, blobID int64, +) error { + sqlQuery := ` + INSERT INTO registry_blobs ( + rblob_blob_id, + rblob_registry_id, + rblob_image_name, + rblob_created_at, + rblob_updated_at, + rblob_created_by, + rblob_updated_by + ) VALUES ( + :rblob_blob_id, + :rblob_registry_id, + :rblob_image_name, + :rblob_created_at, + :rblob_updated_at, + :rblob_created_by, + :rblob_updated_by + ) ON CONFLICT ( + rblob_registry_id, rblob_blob_id, rblob_image_name + ) DO NOTHING + RETURNING rblob_registry_id` + + rblob := mapToInternalRegistryBlob(ctx, registry.ID, blobID, imageName) + db := dbtx.GetAccessor(ctx, r.db) + query, arg, err := db.BindNamed(sqlQuery, rblob) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + var registryBlobID int64 + + if err = db.QueryRowContext(ctx, query, arg...).Scan(®istryBlobID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil + } + + return databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + + log.Ctx(ctx).Info().Msgf("Linking blob to registry %d with id: %d", registry.ID, registryBlobID) + + return nil +} + +// UnlinkBlob unlinks a blob from a repository. It does nothing if not linked. A boolean is returned to denote whether +// the link was deleted or not. This avoids the need for a separate preceding `SELECT` to find if it exists. +func (r registryBlobDao) UnlinkBlob( + ctx context.Context, imageName string, + registry *types.Registry, blobID int64, +) (bool, error) { + stmt := databaseg.Builder.Delete("registry_blobs"). + Where("rblob_registry_id = ? AND rblob_blob_id = ? "+ + "AND rblob_image_name = ?", registry.ID, blobID, imageName) + + sql, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert purge registry query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, r.db) + + result, err := db.ExecContext(ctx, sql, args...) + if err != nil { + return false, databaseg.ProcessSQLErrorf(ctx, err, "error unlinking blobs") + } + + affected, err := result.RowsAffected() + return affected == 1, err +} + +func mapToInternalRegistryBlob( + ctx context.Context, registryID int64, blobID int64, + imageName string, +) *registryBlobDB { + creationTime := time.Now().UnixMilli() + session, _ := request.AuthSessionFrom(ctx) + return ®istryBlobDB{ + RegistryID: registryID, + BlobID: blobID, + ImageName: imageName, + CreatedAt: creationTime, + UpdatedAt: creationTime, + CreatedBy: session.Principal.ID, + UpdatedBy: session.Principal.ID, + } +} diff --git a/registry/app/store/database/tag.go b/registry/app/store/database/tag.go new file mode 100644 index 000000000..928a10708 --- /dev/null +++ b/registry/app/store/database/tag.go @@ -0,0 +1,937 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strings" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + store2 "github.com/harness/gitness/store" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + sq "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +const ( + // OrderDesc is the normalized string to be used for sorting results in descending order. + OrderDesc types.SortOrder = "desc" + lessThan string = "<" + greaterThan string = ">" + labelSeparatorStart string = "%^_" + labelSeparatorEnd string = "^_%" +) + +type tagDao struct { + db *sqlx.DB +} + +func NewTagDao(db *sqlx.DB) store.TagRepository { + return &tagDao{ + db: db, + } +} + +// tagDB holds the record of a tag in DB. +type tagDB struct { + ID int64 `db:"tag_id"` + Name string `db:"tag_name"` + ImageName string `db:"tag_image_name"` + RegistryID int64 `db:"tag_registry_id"` + ManifestID int64 `db:"tag_manifest_id"` + CreatedAt int64 `db:"tag_created_at"` + UpdatedAt int64 `db:"tag_updated_at"` + CreatedBy sql.NullInt64 `db:"tag_created_by"` + UpdatedBy sql.NullInt64 `db:"tag_updated_by"` +} + +type artifactMetadataDB struct { + Name string `db:"name"` + RepoName string `db:"repo_name"` + DownloadCount int64 `db:"download_count"` + PackageType artifact.PackageType `db:"package_type"` + Labels sql.NullString `db:"labels"` + LatestVersion string `db:"latest_version"` + CreatedAt int64 `db:"created_at"` + ModifiedAt int64 `db:"modified_at"` +} + +type tagMetadataDB struct { + Name string `db:"name"` + Size string `db:"size"` + PackageType artifact.PackageType `db:"package_type"` + DigestCount int `db:"digest_count"` + IsLatestVersion bool `db:"latest_version"` + ModifiedAt int64 `db:"modified_at"` +} + +type tagDetailDB struct { + ID int64 `db:"id"` + Name string `db:"name"` + ImageName string `db:"image_name"` + CreatedAt int64 `db:"created_at"` + UpdatedAt int64 `db:"updated_at"` + Size string `db:"size"` +} + +func (t tagDao) CreateOrUpdate(ctx context.Context, tag *types.Tag) error { + const sqlQuery = ` + INSERT INTO tags ( + tag_name + ,tag_image_name + ,tag_registry_id + ,tag_manifest_id + ,tag_created_at + ,tag_updated_at + ,tag_created_by + ,tag_updated_by + ) VALUES ( + :tag_name + ,:tag_image_name + ,:tag_registry_id + ,:tag_manifest_id + ,:tag_created_at + ,:tag_updated_at + ,:tag_created_by + ,:tag_updated_by + ) + ON CONFLICT (tag_registry_id, tag_name, tag_image_name) + DO UPDATE SET + tag_manifest_id = :tag_manifest_id, + tag_updated_at = :tag_updated_at + WHERE + tags.tag_manifest_id <> :tag_manifest_id + RETURNING + tag_id, tag_created_at, tag_updated_at` + + db := dbtx.GetAccessor(ctx, t.db) + tagDB := t.mapToInternalTag(ctx, tag) + query, arg, err := db.BindNamed(sqlQuery, tagDB) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan( + &tagDB.ID, + &tagDB.CreatedAt, &tagDB.UpdatedAt, + ); err != nil { + err := databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + if !errors.Is(err, store2.ErrResourceNotFound) { + return err + } + } + return nil +} + +// LockTagByNameForUpdate locks a tag by name within a repository using SELECT FOR UPDATE. +// It returns a boolean indicating whether the tag exists and was successfully locked. +func (t tagDao) LockTagByNameForUpdate( + ctx context.Context, repoID int64, + name string, +) (bool, error) { + // Since tag_registry_id is not unique in the DB schema, we use LIMIT 1 to ensure that + // only one record is locked and processed. + stmt := databaseg.Builder.Select("1"). + From("tags"). + Where("tag_registry_id = ? AND tag_name = ?", repoID, name). + Limit(1). + Suffix("FOR UPDATE") + + sqlQuery, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert select for update query to SQL: %w", err) + } + + db := dbtx.GetAccessor(ctx, t.db) + + var exists int + err = db.QueryRowContext(ctx, sqlQuery, args...).Scan(&exists) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil // Tag does not exist + } + return false, databaseg.ProcessSQLErrorf(ctx, err, "the select for update query failed") + } + return true, nil +} + +// DeleteTagByName deletes a tag by name within a repository. A boolean is returned to denote whether the tag was +// deleted or not. This avoids the need for a separate preceding `SELECT` to find if it exists. +func (t tagDao) DeleteTagByName( + ctx context.Context, repoID int64, + name string, +) (bool, error) { + stmt := databaseg.Builder.Delete("tags"). + Where("tag_registry_id = ? AND tag_name = ?", repoID, name) + + sql, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert purge tag query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, t.db) + + result, err := db.ExecContext(ctx, sql, args...) + if err != nil { + return false, databaseg.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + count, _ := result.RowsAffected() + return count == 1, nil +} + +// DeleteTagByName deletes a tag by name within a repository. +// +// A boolean is returned to denote whether the tag was +// +// deleted or not. This avoids the need for a separate preceding +// +// `SELECT` to find if it exists. +func (t tagDao) DeleteTagByManifestID( + ctx context.Context, + repoID int64, + manifestID int64, +) (bool, error) { + stmt := databaseg.Builder.Delete("tags"). + Where("tag_registry_id = ? AND tag_manifest_id = ?", repoID, manifestID) + + sql, args, err := stmt.ToSql() + if err != nil { + return false, fmt.Errorf("failed to convert purge tag query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, t.db) + + result, err := db.ExecContext(ctx, sql, args...) + if err != nil { + return false, databaseg.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + count, _ := result.RowsAffected() + return count > 0, nil +} + +// TagsPaginated finds up to `filters.MaxEntries` tags of a given +// repository with name lexicographically after `filters.LastEntry`. +// This is used exclusively for the GET /v2//tags/list API route, +// where pagination is done with a marker (`filters.LastEntry`). +// Even if there is no tag with a name of `filters.LastEntry`, +// the returned tags will always be those with a path lexicographically after +// `filters.LastEntry`. Finally, tags are lexicographically sorted. +// These constraints exists to preserve the existing API behaviour +// (when doing a filesystem walk based pagination). +func (t tagDao) TagsPaginated( + ctx context.Context, repoID int64, + image string, filters types.FilterParams, +) ([]*types.Tag, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(tagDB{}), ",")). + From("tags"). + Where( + "tag_registry_id = ? AND tag_image_name = ? AND tag_name > ?", + repoID, image, filters.LastEntry, + ). + OrderBy("tag_name").Limit(uint64(filters.MaxEntries)) + + db := dbtx.GetAccessor(ctx, t.db) + + dst := []*tagDB{} + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find tag") + } + return t.mapToTagList(ctx, dst) +} + +func (t tagDao) HasTagsAfterName( + ctx context.Context, repoID int64, + filters types.FilterParams, +) (bool, error) { + stmt := databaseg.Builder. + Select("COUNT(*)"). + From("tags"). + Where( + "tag_registry_id = ? AND tag_name LIKE ? ", + repoID, sqlPartialMatch(filters.Name), + ) + comparison := greaterThan + if filters.SortOrder == OrderDesc { + comparison = lessThan + } + + if filters.OrderBy != "published_at" { + stmt = stmt.Where("tag_name "+comparison+" ?", filters.LastEntry) + } else { + stmt = stmt.Where( + "AND (GREATEST(tag_created_at, tag_updated_at), tag_name) "+comparison+" (? ?)", + filters.PublishedAt, filters.LastEntry, + ) + } + stmt = stmt.OrderBy("tag_name").GroupBy("tag_name").Limit(uint64(filters.MaxEntries)) + + db := dbtx.GetAccessor(ctx, t.db) + + var count int64 + sqlQuery, args, err := stmt.ToSql() + if err != nil { + return false, errors.Wrap(err, "Failed to convert query to sqlQuery") + } + + if err = db.QueryRowContext(ctx, sqlQuery, args...).Scan(&count); err != nil && + !errors.Is(err, sql.ErrNoRows) { + return false, + databaseg.ProcessSQLErrorf(ctx, err, "Failed to find tag") + } + return count == 1, nil +} + +// sqlPartialMatch builds a string that can be passed as value +// +// for a SQL `LIKE` expression. Besides surrounding the +// +// input value with `%` wildcard characters for a partial match, +// +// this function also escapes the `_` and `%` +// +// metacharacters supported in Postgres `LIKE` expressions. +// See https://www.postgresql.org/docs/current/ +// functions-matching.html#FUNCTIONS-LIKE for more details. +func sqlPartialMatch(value string) string { + value = strings.ReplaceAll(value, "_", `\_`) + value = strings.ReplaceAll(value, "%", `\%`) + + return fmt.Sprintf("%%%s%%", value) +} + +func (t tagDao) GetAllArtifactsByParentID( + ctx context.Context, + parentID int64, + packageTypes *[]string, + sortByField string, + sortByOrder string, + limit int, + offset int, + search string, + labels []string, +) (*[]types.ArtifactMetadata, error) { + q := databaseg.Builder.Select( + "r.registry_name as repo_name, t.tag_image_name as name,"+ + " r.registry_package_type as package_type, t.tag_name as latest_version,"+ + " t.tag_updated_at as modified_at, ar.artifact_labels as labels, t2.download_count as download_count ", + ). + From("tags t"). + Join( + "(SELECT t.tag_id as id, ROW_NUMBER() OVER "+ + " (PARTITION BY t.tag_registry_id, t.tag_image_name ORDER BY t.tag_updated_at DESC) AS rank "+ + " FROM tags t JOIN registries r ON t.tag_registry_id = r.registry_id "+ + " WHERE r.registry_parent_id = ? ) AS a ON t.tag_id = a.id", parentID, + ). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join( + "artifacts ar ON ar.artifact_registry_id = t.tag_registry_id AND"+ + " ar.artifact_name = t.tag_image_name", + ). + LeftJoin( + "(SELECT b.artifact_name as artifact_name, COALESCE(sum(a.artifact_stat_download_count),0) as"+ + " download_count FROM artifact_stats a LEFT JOIN artifacts b"+ + " ON a.artifact_stat_artifact_id = b.artifact_id LEFT JOIN registries c"+ + " ON b.artifact_registry_id = c.registry_id"+ + " WHERE c.registry_parent_id = ? AND b.artifact_enabled = true GROUP BY b.artifact_name) as t2"+ + " ON t.tag_image_name = t2.artifact_name", parentID, + ). + Where("a.rank = 1") + + if len(*packageTypes) > 0 { + q = q.Where(sq.Eq{"r.registry_package_type": packageTypes}) + } + + if len(labels) > 0 { + sort.Strings(labels) + labelsVal := getEmptySQLString(util.ArrToString(labels)) + + labelsVal.String = labelSeparatorStart + labelsVal.String + labelSeparatorEnd + q = q.Where("'^_' || ar.artifact_labels || '^_' LIKE ?", labelsVal) + } + + if search != "" { + q = q.Where("tag_image_name LIKE ?", sqlPartialMatch(search)) + } + + q = q.OrderBy("tag_" + sortByField + " " + sortByOrder).Limit(uint64(limit)).Offset(uint64(offset)) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := []*artifactMetadataDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing custom list query") + } + return t.mapToArtifactMetadataList(ctx, dst) +} + +func (t tagDao) CountAllArtifactsByParentID( + ctx context.Context, parentID int64, + packageTypes *[]string, search string, labels []string, +) (int64, error) { + // nolint:goconst + q := databaseg.Builder.Select("COUNT(*)"). + From("tags t"). + Join( + "(SELECT t.tag_id as id, ROW_NUMBER() OVER "+ + " (PARTITION BY t.tag_registry_id, t.tag_image_name ORDER BY t.tag_updated_at DESC) AS rank FROM tags t "+ + " JOIN registries r ON t.tag_registry_id = r.registry_id "+ + " WHERE r.registry_parent_id = ?) AS a ON t.tag_id = a.id", parentID, + ). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join( + "artifacts ar ON ar.artifact_registry_id = t.tag_registry_id" + + " AND ar.artifact_name = t.tag_image_name", + ). + Where("a.rank = 1 ") + + if len(*packageTypes) > 0 { + q = q.Where(sq.Eq{"r.registry_package_type": packageTypes}) + } + + if search != "" { + q = q.Where("tag_image_name LIKE ?", sqlPartialMatch(search)) + } + + if len(labels) > 0 { + sort.Strings(labels) + labelsVal := getEmptySQLString(util.ArrToString(labels)) + labelsVal.String = labelSeparatorStart + labelsVal.String + labelSeparatorEnd + q = q.Where("'^_' || ar.artifact_labels || '^_' LIKE ?", labelsVal) + } + + sql, args, err := q.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + db := dbtx.GetAccessor(ctx, t.db) + + var count int64 + err = db.QueryRowContext(ctx, sql, args...).Scan(&count) + if err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing count query") + } + return count, nil +} + +func (t tagDao) GetTagDetail( + ctx context.Context, repoID int64, imageName string, + name string, +) (*types.TagDetail, error) { + q := databaseg.Builder.Select( + "tag_id as id, tag_name as name ,"+ + " tag_image_name as image_name, tag_created_at as created_at, "+ + " tag_updated_at as updated_at, manifest_total_size as size", + ). + From("tags"). + Join("manifests ON manifest_id = tag_manifest_id"). + Where( + "tag_registry_id = ? AND tag_image_name = ? AND tag_name = ?", + repoID, imageName, name, + ) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := new(tagDetailDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + + return t.mapToTagDetail(ctx, dst) +} + +func (t tagDao) GetLatestTagMetadata( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, +) (*types.ArtifactMetadata, error) { + q := databaseg.Builder.Select( + "r.registry_name as repo_name,"+ + " r.registry_package_type as package_type, t.tag_image_name as name, "+ + "t.tag_name as latest_version, t.tag_created_at as created_at,"+ + " t.tag_updated_at as modified_at, ar.artifact_labels as labels", + ). + From("tags t"). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join( + "artifacts ar ON ar.artifact_registry_id = t.tag_registry_id "+ + "AND ar.artifact_name = t.tag_image_name", + ). + Where( + "r.registry_parent_id = ? AND r.registry_name = ?"+ + " AND t.tag_image_name = ?", parentID, repoKey, imageName, + ). + OrderBy("t.tag_updated_at DESC").Limit(1) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := new(artifactMetadataDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + + return t.mapToArtifactMetadata(ctx, dst) +} + +func (t tagDao) GetLatestTagName( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, +) (string, error) { + q := databaseg.Builder.Select("tag_name as name"). + From("tags"). + Join("registries ON tag_registry_id = registry_id"). + Where( + "registry_parent_id = ? AND registry_name = ? AND tag_image_name = ?", + parentID, repoKey, imageName, + ). + OrderBy("tag_updated_at DESC").Limit(1) + + sql, args, err := q.ToSql() + if err != nil { + return "", errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + var tag string + err = db.QueryRowContext(ctx, sql, args...).Scan(&tag) + if err != nil { + return tag, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing get tag name query") + } + return tag, nil +} + +func (t tagDao) GetTagMetadata( + ctx context.Context, + parentID int64, + repoKey string, + imageName string, + name string, +) (*types.TagMetadata, error) { + q := databaseg.Builder.Select( + "registry_package_type as package_type, tag_name as name,"+ + "tag_updated_at as modified_at, manifest_total_size as size", + ). + From("tags"). + Join("registries ON tag_registry_id = registry_id"). + Join("manifests ON manifest_id = tag_manifest_id"). + Where( + "registry_parent_id = ? AND registry_name = ?"+ + " AND tag_image_name = ? AND tag_name = ?", parentID, repoKey, imageName, name, + ) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := new(tagMetadataDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag metadata") + } + + return t.mapToTagMetadata(ctx, dst) +} + +func (t tagDao) GetLatestTag(ctx context.Context, repoID int64, imageName string) (*types.Tag, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(tagDB{}), ",")). + From("tags"). + Where("tag_registry_id = ? AND tag_image_name = ?", repoID, imageName). + OrderBy("tag_updated_at DESC").Limit(1) + + db := dbtx.GetAccessor(ctx, t.db) + + dst := new(tagDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find tag") + } + + return t.mapToTag(ctx, dst) +} + +func (t tagDao) GetAllArtifactsByRepo( + ctx context.Context, parentID int64, repoKey string, + sortByField string, sortByOrder string, limit int, offset int, search string, + labels []string, +) (*[]types.ArtifactMetadata, error) { + q := databaseg.Builder.Select( + "r.registry_name as repo_name, t.tag_image_name as name,"+ + " r.registry_package_type as package_type, t.tag_name as latest_version,"+ + " t.tag_updated_at as modified_at, ar.artifact_labels as labels, t2.download_count ", + ). + From("tags t"). + Join( + "(SELECT t.tag_id as id, ROW_NUMBER() OVER (PARTITION BY t.tag_registry_id, t.tag_image_name"+ + " ORDER BY t.tag_updated_at DESC) AS rank FROM tags t "+ + " JOIN registries r ON t.tag_registry_id = r.registry_id "+ + " WHERE r.registry_parent_id = ? AND r.registry_name = ? ) AS a"+ + " ON t.tag_id = a.id", parentID, repoKey, + ). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join( + "artifacts ar ON ar.artifact_registry_id = t.tag_registry_id"+ + " AND ar.artifact_name = t.tag_image_name", + ). + LeftJoin( + "(SELECT b.artifact_name as artifact_name, COALESCE(sum(a.artifact_stat_download_count),0) as"+ + " download_count FROM artifact_stats a LEFT JOIN artifacts b"+ + " ON a.artifact_stat_artifact_id = b.artifact_id LEFT JOIN registries c"+ + " ON b.artifact_registry_id = c.registry_id"+ + " WHERE c.registry_parent_id = ? AND c.registry_name = ? AND b.artifact_enabled = true"+ + " GROUP BY b.artifact_name) as t2"+ + " ON t.tag_image_name = t2.artifact_name", parentID, repoKey, + ). + Where("a.rank = 1 ") + + if search != "" { + q = q.Where("tag_image_name LIKE ?", sqlPartialMatch(search)) + } + + if len(labels) > 0 { + sort.Strings(labels) + labelsVal := getEmptySQLString(util.ArrToString(labels)) + labelsVal.String = labelSeparatorStart + labelsVal.String + labelSeparatorEnd + q = q.Where("'^_' || ar.artifact_labels || '^_' LIKE ?", labelsVal) + } + + q = q.OrderBy("t.tag_" + sortByField + " " + sortByOrder).Limit(uint64(limit)).Offset(uint64(offset)) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := []*artifactMetadataDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing custom list query") + } + return t.mapToArtifactMetadataList(ctx, dst) +} + +func (t tagDao) CountAllArtifactsByRepo( + ctx context.Context, parentID int64, repoKey string, + search string, labels []string, +) (int64, error) { + q := databaseg.Builder.Select("COUNT(*)"). + From("tags t"). + Join( + "(SELECT t.tag_id as id, ROW_NUMBER() OVER (PARTITION BY t.tag_registry_id, t.tag_image_name"+ + " ORDER BY t.tag_updated_at DESC) AS rank FROM tags t "+ + " JOIN registries r ON t.tag_registry_id = r.registry_id "+ + " WHERE r.registry_parent_id = ? AND r.registry_name = ? ) AS a ON t.tag_id = a.id", parentID, repoKey, + ). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join( + "artifacts ar ON ar.artifact_registry_id = t.tag_registry_id AND" + + " ar.artifact_name = t.tag_image_name", + ). + Where("a.rank = 1 ") + + if search != "" { + q = q.Where("tag_image_name LIKE ?", sqlPartialMatch(search)) + } + + if len(labels) > 0 { + sort.Strings(labels) + labelsVal := getEmptySQLString(util.ArrToString(labels)) + labelsVal.String = labelSeparatorStart + labelsVal.String + labelSeparatorEnd + q = q.Where("'^_' || ar.artifact_labels || '^_' LIKE ?", labelsVal) + } + + sql, args, err := q.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + db := dbtx.GetAccessor(ctx, t.db) + + var count int64 + err = db.QueryRowContext(ctx, sql, args...).Scan(&count) + if err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing count query") + } + return count, nil +} + +func (t tagDao) GetAllTagsByRepoAndImage( + ctx context.Context, parentID int64, repoKey string, + image string, sortByField string, sortByOrder string, limit int, offset int, + search string, +) (*[]types.TagMetadata, error) { + q := databaseg.Builder.Select( + "t.tag_name as name, m.manifest_total_size as size,"+ + " r.registry_package_type as package_type, t.tag_updated_at as modified_at", + ). + From("tags t"). + Join("registries r ON t.tag_registry_id = r.registry_id"). + Join("manifests m ON t.tag_manifest_id = m.manifest_id"). + Where( + "r.registry_parent_id = ? AND r.registry_name = ? AND t.tag_image_name = ?", + parentID, repoKey, image, + ) + + if search != "" { + q = q.Where("tag_name LIKE ?", sqlPartialMatch(search)) + } + + q = q.OrderBy("tag_" + sortByField + " " + sortByOrder).Limit(uint64(limit)).Offset(uint64(offset)) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + dst := []*tagMetadataDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing custom list query") + } + return t.mapToTagMetadataList(ctx, dst) +} + +func (t tagDao) CountAllTagsByRepoAndImage( + ctx context.Context, parentID int64, + repoKey string, image string, search string, +) (int64, error) { + stmt := databaseg.Builder.Select("COUNT(*)"). + From("tags"). + Join("registries ON tag_registry_id = registry_id"). + Join("manifests ON tag_manifest_id = manifest_id"). + Where( + "registry_parent_id = ? AND registry_name = ?"+ + "AND tag_image_name = ?", parentID, repoKey, image, + ) + + if search != "" { + stmt = stmt.Where("tag_name LIKE ?", sqlPartialMatch(search)) + } + + sql, args, err := stmt.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, t.db) + + var count int64 + err = db.QueryRowContext(ctx, sql, args...).Scan(&count) + if err != nil { + return 0, databaseg.ProcessSQLErrorf(ctx, err, "Failed executing count query") + } + return count, nil +} + +func (t tagDao) FindTag( + ctx context.Context, repoID int64, imageName string, + name string, +) (*types.Tag, error) { + stmt := databaseg.Builder. + Select(util.ArrToStringByDelimiter(util.GetDBTagsFromStruct(tagDB{}), ",")). + From("tags"). + Where("tag_registry_id = ? AND tag_image_name = ? AND tag_name = ?", repoID, imageName, name) + + db := dbtx.GetAccessor(ctx, t.db) + + dst := new(tagDB) + sql, args, err := stmt.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to find tag") + } + + //TODO: validate for empty row + return t.mapToTag(ctx, dst) +} + +func (t tagDao) mapToInternalTag(ctx context.Context, in *types.Tag) *tagDB { + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + return &tagDB{ + ID: in.ID, + Name: in.Name, + ImageName: in.ImageName, + RegistryID: in.RegistryID, + ManifestID: in.ManifestID, + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: sql.NullInt64{Int64: in.CreatedBy, Valid: true}, + UpdatedBy: sql.NullInt64{Int64: in.UpdatedBy, Valid: true}, + } +} + +func (t tagDao) mapToTag(_ context.Context, dst *tagDB) (*types.Tag, error) { + createdBy := int64(-1) + updatedBy := int64(-1) + if dst.CreatedBy.Valid { + createdBy = dst.CreatedBy.Int64 + } + if dst.UpdatedBy.Valid { + updatedBy = dst.UpdatedBy.Int64 + } + return &types.Tag{ + ID: dst.ID, + Name: dst.Name, + ImageName: dst.ImageName, + RegistryID: dst.RegistryID, + ManifestID: dst.ManifestID, + CreatedAt: time.UnixMilli(dst.CreatedAt), + UpdatedAt: time.UnixMilli(dst.UpdatedAt), + CreatedBy: createdBy, + UpdatedBy: updatedBy, + }, nil +} + +func (t tagDao) mapToTagList(ctx context.Context, dst []*tagDB) ([]*types.Tag, error) { + tags := make([]*types.Tag, 0, len(dst)) + for _, d := range dst { + tag, err := t.mapToTag(ctx, d) + if err != nil { + return nil, err + } + tags = append(tags, tag) + } + return tags, nil +} + +func (t tagDao) mapToArtifactMetadataList( + ctx context.Context, + dst []*artifactMetadataDB, +) (*[]types.ArtifactMetadata, error) { + artifacts := make([]types.ArtifactMetadata, 0, len(dst)) + for _, d := range dst { + artifact, err := t.mapToArtifactMetadata(ctx, d) + if err != nil { + return nil, err + } + artifacts = append(artifacts, *artifact) + } + return &artifacts, nil +} + +func (t tagDao) mapToArtifactMetadata( + _ context.Context, + dst *artifactMetadataDB, +) (*types.ArtifactMetadata, error) { + return &types.ArtifactMetadata{ + Name: dst.Name, + RepoName: dst.RepoName, + DownloadCount: dst.DownloadCount, + PackageType: dst.PackageType, + LatestVersion: dst.LatestVersion, + Labels: util.StringToArr(dst.Labels.String), + CreatedAt: time.UnixMilli(dst.CreatedAt), + ModifiedAt: time.UnixMilli(dst.ModifiedAt), + }, nil +} + +func (t tagDao) mapToTagMetadataList( + ctx context.Context, + dst []*tagMetadataDB, +) (*[]types.TagMetadata, error) { + tags := make([]types.TagMetadata, 0, len(dst)) + for _, d := range dst { + tag, err := t.mapToTagMetadata(ctx, d) + if err != nil { + return nil, err + } + tags = append(tags, *tag) + } + return &tags, nil +} + +func (t tagDao) mapToTagMetadata( + _ context.Context, + dst *tagMetadataDB, +) (*types.TagMetadata, error) { + return &types.TagMetadata{ + Name: dst.Name, + Size: dst.Size, + PackageType: dst.PackageType, + DigestCount: dst.DigestCount, + IsLatestVersion: dst.IsLatestVersion, + ModifiedAt: time.UnixMilli(dst.ModifiedAt), + }, nil +} + +func (t tagDao) mapToTagDetail( + _ context.Context, + dst *tagDetailDB, +) (*types.TagDetail, error) { + return &types.TagDetail{ + ID: dst.ID, + Name: dst.Name, + ImageName: dst.ImageName, + Size: dst.Size, + CreatedAt: time.UnixMilli(dst.CreatedAt), + UpdatedAt: time.UnixMilli(dst.UpdatedAt), + }, nil +} diff --git a/registry/app/store/database/upstream_proxy.go b/registry/app/store/database/upstream_proxy.go new file mode 100644 index 000000000..1480446fa --- /dev/null +++ b/registry/app/store/database/upstream_proxy.go @@ -0,0 +1,420 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/harness/gitness/app/api/request" + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/registry/app/store/database/util" + "github.com/harness/gitness/registry/types" + gitness_store "github.com/harness/gitness/store" + databaseg "github.com/harness/gitness/store/database" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +type UpstreamproxyDao struct { + registryDao store.RegistryRepository + db *sqlx.DB +} + +func NewUpstreamproxyDao(db *sqlx.DB, registryDao store.RegistryRepository) store.UpstreamProxyConfigRepository { + return &UpstreamproxyDao{ + registryDao: registryDao, + db: db, + } +} + +// upstreamProxyConfigDB holds the record of an upstream_proxy_config in DB. +type upstreamProxyConfigDB struct { + ID int64 `db:"upstream_proxy_config_id"` + RegistryID int64 `db:"upstream_proxy_config_registry_id"` + Source string `db:"upstream_proxy_config_source"` + URL string `db:"upstream_proxy_config_url"` + AuthType string `db:"upstream_proxy_config_auth_type"` + UserName string `db:"upstream_proxy_config_user_name"` + SecretIdentifier string `db:"upstream_proxy_config_secret_identifier"` + SecretSpaceID int `db:"upstream_proxy_config_secret_space_id"` + Token string `db:"upstream_proxy_config_token"` + CreatedAt int64 `db:"upstream_proxy_config_created_at"` + UpdatedAt int64 `db:"upstream_proxy_config_updated_at"` + CreatedBy int64 `db:"upstream_proxy_config_created_by"` + UpdatedBy int64 `db:"upstream_proxy_config_updated_by"` +} + +type upstreamProxyDB struct { + ID int64 `db:"id"` + RegistryID int64 `db:"registry_id"` + RepoKey string `db:"repo_key"` + ParentID string `db:"parent_id"` + PackageType artifact.PackageType `db:"package_type"` + AllowedPattern sql.NullString `db:"allowed_pattern"` + BlockedPattern sql.NullString `db:"blocked_pattern"` + Source string `db:"source"` + RepoURL string `db:"repo_url"` + RepoAuthType string `db:"repo_auth_type"` + UserName string `db:"user_name"` + SecretIdentifier string `db:"secret_identifier"` + SecretSpaceID int `db:"secret_space_id"` + Token string `db:"token"` + CreatedAt int64 `db:"created_at"` + UpdatedAt int64 `db:"updated_at"` + CreatedBy sql.NullInt64 `db:"created_by"` + UpdatedBy sql.NullInt64 `db:"updated_by"` +} + +func getUpstreamProxyQuery() squirrel.SelectBuilder { + return databaseg.Builder.Select( + " u.upstream_proxy_config_id as id," + + " r.registry_id as registry_id," + + " r.registry_name as repo_key," + + " r.registry_parent_id as parent_id," + + " r.registry_package_type as package_type," + + " r.registry_allowed_pattern as allowed_pattern," + + " r.registry_blocked_pattern as blocked_pattern," + + " u.upstream_proxy_config_url as repo_url," + + " u.upstream_proxy_config_source as source," + + " u.upstream_proxy_config_auth_type as repo_auth_type," + + " u.upstream_proxy_config_user_name as user_name," + + " u.upstream_proxy_config_secret_identifier as secret_identifier," + + " u.upstream_proxy_config_secret_space_id as secret_space_id," + + " u.upstream_proxy_config_token as token," + + " r.registry_created_at as created_at," + + " r.registry_updated_at as updated_at "). + From("registries r "). + LeftJoin("upstream_proxy_configs u ON r.registry_id = u.upstream_proxy_config_registry_id ") +} + +func (r UpstreamproxyDao) Get(ctx context.Context, id int64) (upstreamProxy *types.UpstreamProxy, err error) { + q := getUpstreamProxyQuery() + q = q.Where("r.registry_id = ? AND r.registry_type = 'UPSTREAM'", id) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + dst := new(upstreamProxyDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + + return r.mapToUpstreamProxy(ctx, dst) +} + +func (r UpstreamproxyDao) GetByRegistryIdentifier( + ctx context.Context, + parentID int64, + repoKey string, +) (upstreamProxy *types.UpstreamProxy, err error) { + q := getUpstreamProxyQuery() + q = q.Where("r.registry_parent_id = ? AND r.registry_name = ? AND r.registry_type = 'UPSTREAM'", + parentID, repoKey) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + dst := new(upstreamProxyDB) + if err = db.GetContext(ctx, dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + + return r.mapToUpstreamProxy(ctx, dst) +} + +func (r UpstreamproxyDao) GetByParentID(ctx context.Context, parentID string) ( + upstreamProxies *[]types.UpstreamProxy, err error) { + q := getUpstreamProxyQuery() + q = q.Where("r.registry_parent_id = ? AND r.registry_type = 'UPSTREAM'", + parentID) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + dst := []*upstreamProxyDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + + return r.mapToUpstreamProxyList(ctx, dst) +} + +func (r UpstreamproxyDao) Create( + ctx context.Context, + upstreamproxyRecord *types.UpstreamProxyConfig, +) (id int64, err error) { + const sqlQuery = ` + INSERT INTO upstream_proxy_configs ( + upstream_proxy_config_registry_id + ,upstream_proxy_config_source + ,upstream_proxy_config_url + ,upstream_proxy_config_auth_type + ,upstream_proxy_config_user_name + ,upstream_proxy_config_secret_identifier + ,upstream_proxy_config_secret_space_id + ,upstream_proxy_config_token + ,upstream_proxy_config_created_at + ,upstream_proxy_config_updated_at + ,upstream_proxy_config_created_by + ,upstream_proxy_config_updated_by + ) VALUES ( + :upstream_proxy_config_registry_id + ,:upstream_proxy_config_source + ,:upstream_proxy_config_url + ,:upstream_proxy_config_auth_type + ,:upstream_proxy_config_user_name + ,:upstream_proxy_config_secret_identifier + ,:upstream_proxy_config_secret_space_id + ,:upstream_proxy_config_token + ,:upstream_proxy_config_created_at + ,:upstream_proxy_config_updated_at + ,:upstream_proxy_config_created_by + ,:upstream_proxy_config_updated_by + ) RETURNING upstream_proxy_config_registry_id` + + db := dbtx.GetAccessor(ctx, r.db) + query, arg, err := db.BindNamed(sqlQuery, r.mapToInternalUpstreamProxy(ctx, upstreamproxyRecord)) + if err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, + err, "Failed to bind upstream proxy object") + } + + if err = db.QueryRowContext(ctx, query, arg...).Scan(&upstreamproxyRecord.ID); err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, err, "Insert query failed") + } + + return upstreamproxyRecord.ID, nil +} + +func (r UpstreamproxyDao) Delete(ctx context.Context, parentID int64, repoKey string) (err error) { + stmt := databaseg.Builder.Delete("upstream_proxy_configs"). + Where("upstream_proxy_config_registry_id in (SELECT registry_id from registries"+ + " WHERE registry_parent_id = ? AND registry_name = ?)", parentID, repoKey) + + sql, args, err := stmt.ToSql() + if err != nil { + return fmt.Errorf("failed to convert purge registry query to sql: %w", err) + } + + db := dbtx.GetAccessor(ctx, r.db) + + _, err = db.ExecContext(ctx, sql, args...) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "the delete query failed") + } + + return nil +} + +func (r UpstreamproxyDao) Update( + ctx context.Context, + upstreamProxyRecord *types.UpstreamProxyConfig, +) (err error) { + var sqlQuery = " UPDATE upstream_proxy_configs SET " + + util.GetSetDBKeys(upstreamProxyConfigDB{}, "upstream_proxy_config_id") + + " WHERE upstream_proxy_config_id = :upstream_proxy_config_id " + + upstreamProxy := r.mapToInternalUpstreamProxy(ctx, upstreamProxyRecord) + + // update Version (used for optimistic locking) and Updated time + upstreamProxy.UpdatedAt = time.Now().UnixMilli() + + db := dbtx.GetAccessor(ctx, r.db) + + query, arg, err := db.BindNamed(sqlQuery, upstreamProxy) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to bind repo object") + } + + result, err := db.ExecContext(ctx, query, arg...) + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to update repository") + } + + count, err := result.RowsAffected() + if err != nil { + return databaseg.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows") + } + + if count == 0 { + return gitness_store.ErrVersionConflict + } + + return nil +} + +func (r UpstreamproxyDao) GetAll( + ctx context.Context, parentID int64, + packageTypes []string, sortByField string, sortByOrder string, limit int, + offset int, search string, +) (upstreamProxies *[]types.UpstreamProxy, err error) { + q := getUpstreamProxyQuery() + q = q.Where("r.registry_parent_id = ? AND r.registry_type = 'UPSTREAM'", + parentID) + + if search != "" { + q = q.Where(" r.registry_name LIKE ?", sqlPartialMatch(search)) + } + + if len(packageTypes) > 0 { + q = q.Where(" AND r.registry_package_type in ? ", packageTypes) + } + + q = q.OrderBy(" r.registry_" + sortByField + " " + sortByOrder).Limit(uint64(limit)).Offset(uint64(offset)) + + sql, args, err := q.ToSql() + if err != nil { + return nil, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + dst := []*upstreamProxyDB{} + if err = db.SelectContext(ctx, &dst, sql, args...); err != nil { + return nil, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get tag detail") + } + return r.mapToUpstreamProxyList(ctx, dst) +} + +func (r UpstreamproxyDao) CountAll( + ctx context.Context, parentID string, + packageTypes []string, search string, +) (count int64, err error) { + q := databaseg.Builder.Select(" COUNT(*) "). + From(" registries r"). + LeftJoin(" upstream_proxy_configs u ON r.registry_id = u.upstream_proxy_config_registry_id "). + Where("r.registry_parent_id = ? AND r.registry_type = 'UPSTREAM'", + parentID) + + if search != "" { + q = q.Where(" r.registry_name LIKE '%" + search + "%' ") + } + + if len(packageTypes) > 0 { + q = q.Where(" AND r.registry_package_type in ? ", packageTypes) + } + + sql, args, err := q.ToSql() + if err != nil { + return -1, errors.Wrap(err, "Failed to convert query to sql") + } + + db := dbtx.GetAccessor(ctx, r.db) + + var total int64 + if err = db.QueryRowContext(ctx, sql, args...).Scan(&total); err != nil { + return -1, databaseg.ProcessSQLErrorf(ctx, err, "Failed to get upstream proxy count") + } + return total, nil +} + +func (r UpstreamproxyDao) mapToInternalUpstreamProxy( + ctx context.Context, + in *types.UpstreamProxyConfig, +) *upstreamProxyConfigDB { + if in.CreatedAt.IsZero() { + in.CreatedAt = time.Now() + } + in.UpdatedAt = time.Now() + session, _ := request.AuthSessionFrom(ctx) + if in.CreatedBy == 0 { + in.CreatedBy = session.Principal.ID + } + in.UpdatedBy = session.Principal.ID + + return &upstreamProxyConfigDB{ + ID: in.ID, + RegistryID: in.RegistryID, + Source: in.Source, + URL: in.URL, + AuthType: in.AuthType, + UserName: in.UserName, + SecretIdentifier: in.SecretIdentifier, + SecretSpaceID: in.SecretSpaceID, + Token: in.Token, + CreatedAt: in.CreatedAt.UnixMilli(), + UpdatedAt: in.UpdatedAt.UnixMilli(), + CreatedBy: in.CreatedBy, + UpdatedBy: in.UpdatedBy, + } +} + +func (r UpstreamproxyDao) mapToUpstreamProxy( + _ context.Context, + dst *upstreamProxyDB, +) (*types.UpstreamProxy, error) { + createdBy := int64(-1) + updatedBy := int64(-1) + if dst.CreatedBy.Valid { + createdBy = dst.CreatedBy.Int64 + } + if dst.UpdatedBy.Valid { + updatedBy = dst.UpdatedBy.Int64 + } + return &types.UpstreamProxy{ + ID: dst.ID, + RegistryID: dst.RegistryID, + RepoKey: dst.RepoKey, + ParentID: dst.ParentID, + PackageType: dst.PackageType, + AllowedPattern: util.StringToArr(dst.AllowedPattern.String), + BlockedPattern: util.StringToArr(dst.BlockedPattern.String), + Source: dst.Source, + RepoURL: dst.RepoURL, + RepoAuthType: dst.RepoAuthType, + UserName: dst.UserName, + SecretIdentifier: dst.SecretIdentifier, + SecretSpaceID: dst.SecretSpaceID, + Token: dst.Token, + CreatedAt: time.UnixMilli(dst.CreatedAt), + UpdatedAt: time.UnixMilli(dst.UpdatedAt), + CreatedBy: createdBy, + UpdatedBy: updatedBy, + }, nil +} + +func (r UpstreamproxyDao) mapToUpstreamProxyList( + ctx context.Context, + dst []*upstreamProxyDB, +) (*[]types.UpstreamProxy, error) { + upstreamProxies := make([]types.UpstreamProxy, 0, len(dst)) + for _, d := range dst { + upstreamProxy, err := r.mapToUpstreamProxy(ctx, d) + if err != nil { + return nil, err + } + upstreamProxies = append(upstreamProxies, *upstreamProxy) + } + return &upstreamProxies, nil +} diff --git a/registry/app/store/database/util/errors.go b/registry/app/store/database/util/errors.go new file mode 100644 index 000000000..8c8ffb8fc --- /dev/null +++ b/registry/app/store/database/util/errors.go @@ -0,0 +1,42 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "errors" + "fmt" +) + +var ( + // ErrNotFound is returned when a row is not found on the metadata database. + ErrNotFound = errors.New("not found") + // ErrManifestNotFound is returned when a manifest is not found on the metadata database. + ErrManifestNotFound = fmt.Errorf("manifest %w", ErrNotFound) + // ErrRefManifestNotFound is returned when a manifest referenced by a list/index is not found on the metadata database. + ErrRefManifestNotFound = fmt.Errorf("referenced %w", ErrManifestNotFound) + // ErrManifestReferencedInList is returned when attempting to delete a manifest referenced in at least one list. + ErrManifestReferencedInList = errors.New("manifest referenced by manifest list") +) + +// UnknownMediaTypeError is returned when attempting to save a manifest containing references with unknown media types. +type UnknownMediaTypeError struct { + // MediaType is the offending media type + MediaType string +} + +// Error implements error. +func (err UnknownMediaTypeError) Error() string { + return fmt.Sprintf("unknown media type: %s", err.MediaType) +} diff --git a/registry/app/store/database/util/mapper.go b/registry/app/store/database/util/mapper.go new file mode 100644 index 000000000..f2d60f330 --- /dev/null +++ b/registry/app/store/database/util/mapper.go @@ -0,0 +1,114 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "encoding/hex" + "reflect" + "strconv" + "strings" + + "github.com/harness/gitness/registry/app/pkg/commons" +) + +const ID = "" +const separator = "^_" + +func StringToArr(s string) []string { + return StringToArrByDelimiter(s, separator) +} + +func ArrToString(arr []string) string { + return ArrToStringByDelimiter(arr, separator) +} + +func Int64ArrToString(arr []int64) string { + return Int64ArrToStringByDelimiter(arr, separator) +} + +func StringToInt64Arr(s string) []int64 { + return StringToInt64ArrByDelimiter(s, separator) +} + +func StringToArrByDelimiter(s string, delimiter string) []string { + var arr []string + if commons.IsEmpty(s) { + return arr + } + return strings.Split(s, delimiter) +} + +func ArrToStringByDelimiter(arr []string, delimiter string) string { + return strings.Join(arr, delimiter) +} + +func Int64ArrToStringByDelimiter(arr []int64, delimiter string) string { + var s []string + for _, i := range arr { + s = append(s, strconv.FormatInt(i, 10)) + } + return strings.Join(s, delimiter) +} + +func StringToInt64ArrByDelimiter(s string, delimiter string) []int64 { + var arr []int64 + if commons.IsEmpty(s) { + return arr + } + for _, i := range strings.Split(s, delimiter) { + j, _ := strconv.ParseInt(i, 10, 64) + arr = append(arr, j) + } + return arr +} + +func GetSetDBKeys(s interface{}, ignoreKeys ...string) string { + keys := GetDBTagsFromStruct(s) + filteredKeys := make([]string, 0) + +keysLoop: + for _, key := range keys { + for _, ignoreKey := range ignoreKeys { + if key == ignoreKey { + continue keysLoop + } + } + filteredKeys = append(filteredKeys, key+" = :"+key) + } + return strings.Join(filteredKeys, ", ") +} + +func GetDBTagsFromStruct(s interface{}) []string { + var tags []string + rt := reflect.TypeOf(s) + + for i := 0; i < rt.NumField(); i++ { + field := rt.Field(i) + dbTag := field.Tag.Get("db") + if dbTag != "" { + tags = append(tags, dbTag) + } + } + + return tags +} + +func GetHexDecodedBytes(s string) ([]byte, error) { + return hex.DecodeString(s) +} + +func GetHexEncodedString(b []byte) string { + return hex.EncodeToString(b) +} diff --git a/registry/app/store/database/wire.go b/registry/app/store/database/wire.go new file mode 100644 index 000000000..f40b25c6b --- /dev/null +++ b/registry/app/store/database/wire.go @@ -0,0 +1,87 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "github.com/harness/gitness/registry/app/store" + "github.com/harness/gitness/store/database/dbtx" + + "github.com/google/wire" + "github.com/jmoiron/sqlx" +) + +func ProvideUpstreamDao(db *sqlx.DB, + registryDao store.RegistryRepository) store.UpstreamProxyConfigRepository { + return NewUpstreamproxyDao(db, registryDao) +} + +func ProvideRepoDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.RegistryRepository { + return NewRegistryDao(db, mtRepository) +} + +func ProvideMediaTypeDao(db *sqlx.DB) store.MediaTypesRepository { + return NewMediaTypesDao(db) +} + +func ProvideBlobDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.BlobRepository { + return NewBlobDao(db, mtRepository) +} + +func ProvideRegistryBlobDao(db *sqlx.DB) store.RegistryBlobRepository { + return NewRegistryBlobDao(db) +} + +func ProvideArtifactDao(db *sqlx.DB) store.ArtifactRepository { + return NewArtifactDao(db) +} + +func ProvideArtifactStatDao(db *sqlx.DB) store.ArtifactStatRepository { + return NewArtifactStatDao(db) +} + +func ProvideTagDao(db *sqlx.DB) store.TagRepository { + return NewTagDao(db) +} + +func ProvideManifestDao(sqlDB *sqlx.DB, mtRepository store.MediaTypesRepository) store.ManifestRepository { + return NewManifestDao(sqlDB, mtRepository) +} + +func ProvideManifestRefDao(db *sqlx.DB) store.ManifestReferenceRepository { + return NewManifestReferenceDao(db) +} + +func ProvideLayerDao(db *sqlx.DB, mtRepository store.MediaTypesRepository) store.LayerRepository { + return NewLayersDao(db, mtRepository) +} + +func ProvideCleanupPolicyDao(db *sqlx.DB, tx dbtx.Transactor) store.CleanupPolicyRepository { + return NewCleanupPolicyDao(db, tx) +} + +var WireSet = wire.NewSet( + ProvideUpstreamDao, + ProvideRepoDao, + ProvideMediaTypeDao, + ProvideBlobDao, + ProvideRegistryBlobDao, + ProvideTagDao, + ProvideManifestDao, + ProvideCleanupPolicyDao, + ProvideManifestRefDao, + ProvideLayerDao, + ProvideArtifactDao, + ProvideArtifactStatDao, +) diff --git a/registry/app/store/migrations/files/postgres/000001_init.down.sql b/registry/app/store/migrations/files/postgres/000001_init.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/registry/app/store/migrations/files/postgres/000001_init.up.sql b/registry/app/store/migrations/files/postgres/000001_init.up.sql new file mode 100644 index 000000000..88433119f --- /dev/null +++ b/registry/app/store/migrations/files/postgres/000001_init.up.sql @@ -0,0 +1,561 @@ +create table registries +( + registry_id SERIAL primary key, + registry_name text not null + constraint registry_name_len_check + check (length(registry_name) <= 255), + registry_root_parent_id INTEGER not null, + registry_parent_id INTEGER not null, + registry_description text, + registry_type text not null, + registry_package_type text not null, + registry_upstream_proxies text, + registry_allowed_pattern text, + registry_blocked_pattern text, + registry_created_at BIGINT not null, + registry_updated_at BIGINT not null, + registry_created_by INTEGER not null, + registry_updated_by INTEGER not null, + registry_labels text, + constraint unique_registries + unique (registry_root_parent_id, registry_name) +); + + +create table media_types +( + mt_id SERIAL primary key, + mt_media_type text not null + constraint unique_media_types_type + unique, + mt_created_at BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT +); + +create table blobs +( + blob_id SERIAL primary key, + blob_root_parent_id INTEGER not null, + blob_digest bytea not null, + blob_media_type_id INTEGER not null + constraint fk_blobs_media_type_id_media_types + references media_types(mt_id), + blob_size BIGINT not null, + blob_created_at BIGINT not null, + blob_created_by INTEGER not null, + constraint unique_digest_root_parent_id unique (blob_digest, blob_root_parent_id) +); + +create index index_blobs_on_media_type_id + on blobs (blob_media_type_id); + +create table registry_blobs +( + rblob_id SERIAL primary key, + rblob_registry_id INTEGER not null + constraint fk_registry_blobs_rpstry_id_registries + references registries + on delete cascade, + rblob_blob_id INTEGER not null + constraint fk_registry_blobs_blob_id_blobs + references blobs + on delete cascade, + rblob_image_name text + constraint registry_blobs_image_len_check + check (length(rblob_image_name) <= 255), + rblob_created_at BIGINT not null, + rblob_updated_at BIGINT not null, + rblob_created_by INTEGER not null, + rblob_updated_by INTEGER not null, + + constraint unique_registry_blobs_registry_id_blob_id_image + unique (rblob_registry_id, rblob_blob_id, rblob_image_name) +); + +create index index_registry_blobs_on_reg_id + on registry_blobs (rblob_registry_id); + +create index index_registry_blobs_on_reg_blob_id + on registry_blobs (rblob_registry_id, rblob_blob_id); + +create table manifests +( + manifest_id SERIAL primary key, + manifest_registry_id INTEGER not null + constraint fk_manifests_registry_id_registries + references registries(registry_id) + on delete cascade, + manifest_schema_version smallint not null, + manifest_media_type_id INTEGER not null + constraint fk_manifests_media_type_id_media_types + references media_types(mt_id), + manifest_artifact_media_type text, + manifest_total_size BIGINT not null, + manifest_configuration_media_type text, + manifest_configuration_payload bytea, + manifest_configuration_blob_id INTEGER + constraint fk_manifests_configuration_blob_id_blobs + references blobs(blob_id), + manifest_configuration_digest bytea, + manifest_digest bytea not null, + manifest_payload bytea not null, + manifest_non_conformant boolean default false, + manifest_non_distributable_layers boolean default false, + manifest_subject_id INTEGER, + manifest_subject_digest bytea, + manifest_annotations bytea, + manifest_image_name text not null + constraint manifests_img_name_len_check + check (length(manifest_image_name) <= 255), + manifest_created_at BIGINT not null, + manifest_created_by INTEGER not null, + manifest_updated_at BIGINT not null, + manifest_updated_by INTEGER not null, + constraint unique_manifests_registry_id_image_name_and_digest + unique (manifest_registry_id, manifest_image_name, manifest_digest), + constraint unique_manifests_registry_id_id_cfg_blob_id + unique (manifest_registry_id, manifest_id, manifest_configuration_blob_id), + constraint fk_manifests_subject_id_manifests + foreign key (manifest_subject_id) references manifests + on delete cascade +); + +create index index_manifests_on_media_type_id + on manifests (manifest_media_type_id); + +create index index_manifests_on_configuration_blob_id + on manifests (manifest_configuration_blob_id); + +create table manifest_references +( + manifest_ref_id SERIAL primary key, + manifest_ref_registry_id INTEGER not null, + manifest_ref_parent_id INTEGER not null, + manifest_ref_child_id INTEGER not null, + manifest_ref_created_at BIGINT not null, + manifest_ref_updated_at BIGINT not null, + manifest_ref_created_by INTEGER not null, + manifest_ref_updated_by INTEGER not null, + constraint unique_manifest_references_prt_id_chd_id + unique (manifest_ref_registry_id, manifest_ref_parent_id, manifest_ref_child_id), + constraint fk_manifest_references_parent_id_mnfsts + foreign key (manifest_ref_parent_id) references manifests + on delete cascade, + constraint fk_manifest_references_child_id_mnfsts + foreign key (manifest_ref_child_id) references manifests, + constraint check_manifest_references_parent_id_and_child_id_differ + check (manifest_ref_parent_id <> manifest_ref_child_id) +); + +create index index_manifest_references_on_rpstry_id_child_id + on manifest_references (manifest_ref_registry_id, manifest_ref_child_id); + +create table layers +( + layer_id SERIAL primary key, + layer_registry_id INTEGER not null, + layer_manifest_id INTEGER not null, + layer_media_type_id INTEGER not null + constraint fk_layer_media_type_id_media_types + references media_types, + layer_blob_id INTEGER not null + constraint fk_layer_blob_id_blobs + references blobs, + layer_size BIGINT not null, + layer_created_at BIGINT not null, + layer_updated_at BIGINT not null, + layer_created_by INTEGER not null, + layer_updated_by INTEGER not null, + constraint unique_layer_rpstry_id_and_mnfst_id_and_blob_id + unique (layer_registry_id, layer_manifest_id, layer_blob_id), + constraint unique_layer_rpstry_id_and_id_and_blob_id + unique (layer_registry_id, layer_id, layer_blob_id), + constraint fk_manifst_id_manifests + foreign key (layer_manifest_id) references manifests(manifest_id) + on delete cascade +); + +create index index_layer_on_media_type_id + on layers (layer_media_type_id); + +create index index_layer_on_blob_id + on layers (layer_blob_id); + +create table artifacts +( + artifact_id SERIAL primary key, + artifact_name text not null, + artifact_registry_id INTEGER not null + constraint fk_registries_registry_id + references registries(registry_id) + on delete cascade, + artifact_labels text, + artifact_enabled boolean default false, + artifact_created_at BIGINT, + artifact_updated_at BIGINT, + artifact_created_by INTEGER, + artifact_updated_by INTEGER, + constraint unique_artifact_registry_id_and_name unique (artifact_registry_id, artifact_name), + constraint check_artifact_name_length check ((char_length(artifact_name) <= 255)) +); + +create index index_artifact_on_registry_id ON artifacts USING btree (artifact_registry_id); + + +create table artifact_stats +( + artifact_stat_id SERIAL primary key, + artifact_stat_artifact_id INTEGER not null + constraint fk_artifacts_artifact_id + references artifacts(artifact_id) on delete cascade, + artifact_stat_date BIGINT, + artifact_stat_download_count BIGINT, + artifact_stat_upload_bytes BIGINT, + artifact_stat_download_bytes BIGINT, + artifact_stat_created_at BIGINT not null, + artifact_stat_updated_at BIGINT not null, + artifact_stat_created_by INTEGER not null, + artifact_stat_updated_by INTEGER not null, + constraint unique_artifact_stats_artifact_id_and_date unique (artifact_stat_artifact_id, artifact_stat_date) +); + +create table tags +( + tag_id SERIAL primary key, + tag_name text not null + constraint tag_name_len_check + check (char_length(tag_name) <= 128), + tag_image_name text not null + constraint tag_img_name_len_check + check (length(tag_image_name) <= 255), + tag_registry_id INTEGER not null, + tag_manifest_id INTEGER not null, + tag_created_at BIGINT, + tag_updated_at BIGINT, + tag_created_by INTEGER, + tag_updated_by INTEGER, + constraint fk_tag_manifest_id_manifests FOREIGN KEY +(tag_manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE, + constraint unique_tag_registry_id_and_name_and_image_name + unique (tag_registry_id, tag_name, tag_image_name) +); + +create index index_tag_on_rpository_id_and_manifest_id + on tags (tag_registry_id, tag_manifest_id); + +create table upstream_proxy_configs +( + upstream_proxy_config_id SERIAL primary key, + upstream_proxy_config_registry_id INTEGER not null + constraint fk_upstream_proxy_config_registry_id + references registries + on delete cascade, + upstream_proxy_config_source text, + upstream_proxy_config_url text, + upstream_proxy_config_auth_type text not null, + upstream_proxy_config_user_name text, + upstream_proxy_config_secret_identifier text, + upstream_proxy_config_secret_space_id INTEGER, + constraint fk_layers_secret_identifier_and_secret_space_id + foreign key (upstream_proxy_config_secret_identifier, upstream_proxy_config_secret_space_id) + references secrets(secret_uid, secret_space_id) + on delete cascade, + upstream_proxy_config_token text, + upstream_proxy_config_created_at BIGINT, + upstream_proxy_config_updated_at BIGINT, + upstream_proxy_config_created_by INTEGER, + upstream_proxy_config_updated_by INTEGER +); + +create index index_upstream_proxy_config_on_registry_id + on upstream_proxy_configs (upstream_proxy_config_registry_id); + +create table cleanup_policies +( + cp_id SERIAL primary key, + cp_registry_id INTEGER not null + constraint fk_cleanup_policies_registry_id + references registries ON DELETE CASCADE, + cp_name text, + cp_expiry_time_ms BIGINT, + cp_created_at BIGINT not null, + cp_updated_at BIGINT not null, + cp_created_by INTEGER not null, + cp_updated_by INTEGER not null +); + +create index index_cleanup_policies_on_registry_id + on cleanup_policies (cp_registry_id); + +create table cleanup_policy_prefix_mappings +( + cpp_id SERIAL primary key, + cpp_cleanup_policy_id INTEGER not null + constraint fk_cleanup_policies_id + references cleanup_policies(cp_id) ON DELETE CASCADE, + cpp_prefix text not null, + cpp_prefix_type text not null +); + +create index index_cleanup_policy_map_on_policy_id + on cleanup_policy_prefix_mappings (cpp_cleanup_policy_id); + +insert into media_types (mt_media_type) +values ('application/vnd.docker.distribution.manifest.v1+json'), + ('application/vnd.docker.distribution.manifest.v1+prettyjws'), + ('application/vnd.docker.distribution.manifest.v2+json'), + ('application/vnd.docker.distribution.manifest.list.v2+json'), + ('application/vnd.docker.image.rootfs.diff.tar'), + ('application/vnd.docker.image.rootfs.diff.tar.gzip'), + ('application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'), + ('application/vnd.docker.container.image.v1+json'), + ('application/vnd.docker.container.image.rootfs.diff+x-gtar'), + ('application/vnd.docker.plugin.v1+json'), + ('application/vnd.oci.image.layer.v1.tar'), + ('application/vnd.oci.image.layer.v1.tar+gzip'), + ('application/vnd.oci.image.layer.v1.tar+zstd'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar+gzip'), + ('application/vnd.oci.image.config.v1+json'), + ('application/vnd.oci.image.manifest.v1+json'), + ('application/vnd.oci.image.index.v1+json'), + ('application/vnd.cncf.helm.config.v1+json'), + ('application/tar+gzip'), + ('application/octet-stream'), + ('application/vnd.buildkit.cacheconfig.v0'), + ('application/vnd.cncf.helm.chart.content.v1.tar+gzip'), + ('application/vnd.cncf.helm.chart.provenance.v1.prov'); + + +CREATE TABLE gc_blob_review_queue +( + blob_id INTEGER NOT NULL, + review_after BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM (NOW() + INTERVAL '1 day'))), + review_count INTEGER NOT NULL DEFAULT 0, + created_at BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()), + event text NOT NULL, + CONSTRAINT pk_gc_blob_review_queue primary key (blob_id) +); + +CREATE INDEX index_gc_blob_review_queue_on_review_after ON gc_blob_review_queue USING btree (review_after); + +CREATE TABLE gc_review_after_defaults +( + event text NOT NULL, + value interval NOT NULL, + CONSTRAINT pk_gc_review_after_defaults PRIMARY KEY (event), + CONSTRAINT check_gc_review_after_defaults_event_length CHECK ((char_length(event) <= 255)) +); + +INSERT INTO gc_review_after_defaults (event, value) +VALUES ('blob_upload', interval '1 day'), + ('manifest_upload', interval '1 day'), + ('manifest_delete', interval '1 day'), + ('layer_delete', interval '1 day'), + ('manifest_list_delete', interval '1 day'), + ('tag_delete', interval '1 day'), + ('tag_switch', interval '1 day') +ON CONFLICT (event) + DO NOTHING; + +CREATE TABLE gc_manifest_review_queue +( + registry_id INTEGER NOT NULL, + manifest_id INTEGER NOT NULL, + review_after BIGINT NOT NULL DEFAULT (EXTRACT(EPOCH FROM (NOW() + INTERVAL '1 day'))), + review_count INTEGER NOT NULL DEFAULT 0, + created_at BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()), + event text NOT NULL, + CONSTRAINT pk_gc_manifest_review_queue PRIMARY KEY (registry_id, manifest_id), + CONSTRAINT fk_gc_manifest_review_queue_rp_id_mfst_id_mnfsts FOREIGN KEY (manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE +); + +CREATE INDEX index_gc_manifest_review_queue_on_review_after ON gc_manifest_review_queue USING btree (review_after); + +CREATE OR REPLACE FUNCTION gc_review_after(e text) + RETURNS BIGINT + VOLATILE +AS +$$ +DECLARE + result timestamp WITH time zone; +BEGIN + SELECT (now() + value) + INTO result + FROM gc_review_after_defaults + WHERE event = e; + IF result IS NULL THEN + RETURN EXTRACT(EPOCH FROM (now() + interval '1 day')); + ELSE + RETURN EXTRACT(EPOCH FROM result); + END IF; +END; +$$ + LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION gc_track_blob_uploads() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + VALUES (NEW.blob_id, gc_review_after('blob_upload'), 'blob_upload') + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('blob_upload'), + event = 'blob_upload'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_blob_uploads_trigger + AFTER INSERT + ON blobs + FOR EACH ROW +EXECUTE PROCEDURE public.gc_track_blob_uploads(); + +CREATE OR REPLACE FUNCTION gc_track_manifest_uploads() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (NEW.manifest_registry_id, NEW.manifest_id, gc_review_after('manifest_upload'), 'manifest_upload'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_manifest_uploads_trigger + AFTER INSERT + ON manifests + FOR EACH ROW +EXECUTE PROCEDURE gc_track_manifest_uploads(); + +CREATE OR REPLACE FUNCTION gc_track_deleted_manifests() + RETURNS TRIGGER +AS +$$ +BEGIN + IF OLD.manifest_configuration_blob_id IS NOT NULL THEN -- not all manifests have a configuration +INSERT INTO gc_blob_review_queue (blob_id, review_after, event) +VALUES (OLD.manifest_configuration_blob_id, gc_review_after('manifest_delete'), 'manifest_delete') +ON CONFLICT (blob_id) + DO UPDATE SET + review_after = gc_review_after('manifest_delete'), + event = 'manifest_delete'; +END IF; +RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION gc_track_deleted_layers() + RETURNS TRIGGER +AS +$$ +BEGIN + IF (TG_LEVEL = 'STATEMENT') THEN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + SELECT deleted_rows.layer_blob_id, + gc_review_after('layer_delete'), + 'layer_delete' + FROM old_table deleted_rows + JOIN + blobs b ON deleted_rows.layer_blob_id = b.blob_id + ORDER BY deleted_rows.layer_blob_id ASC + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('layer_delete'), + event = 'layer_delete'; + ELSIF (TG_LEVEL = 'ROW') THEN + INSERT INTO gc_blob_review_queue (blob_id, review_after, event) + VALUES (OLD.blob_id, gc_review_after('layer_delete'), 'layer_delete') + ON CONFLICT (blob_id) + DO UPDATE SET review_after = gc_review_after('layer_delete'), + event = 'layer_delete'; + END IF; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_manifests_trigger + AFTER DELETE + ON manifests + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_manifests(); + +CREATE TRIGGER gc_track_deleted_layers_trigger + AFTER DELETE + ON layers + REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT +EXECUTE FUNCTION gc_track_deleted_layers(); + +CREATE OR REPLACE FUNCTION gc_track_deleted_manifest_lists() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.manifest_ref_registry_id, OLD.manifest_ref_child_id, gc_review_after('manifest_list_delete'), 'manifest_list_delete') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('manifest_list_delete'), + event = 'manifest_list_delete'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_manifest_lists_trigger + AFTER DELETE + ON manifest_references + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_manifest_lists(); + + +CREATE OR REPLACE FUNCTION gc_track_deleted_tags() + RETURNS TRIGGER +AS +$$ +BEGIN + IF EXISTS (SELECT 1 + FROM manifests + WHERE manifest_registry_id = OLD.tag_registry_id + AND manifest_id = OLD.tag_registry_id) THEN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.tag_registry_id, OLD.tag_manifest_id, gc_review_after('tag_delete'), 'tag_delete') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('tag_delete'), + event = 'tag_delete'; + END IF; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_deleted_tag_trigger + AFTER DELETE + ON tags + FOR EACH ROW +EXECUTE PROCEDURE gc_track_deleted_tags(); + +CREATE OR REPLACE FUNCTION gc_track_switched_tags() + RETURNS TRIGGER +AS +$$ +BEGIN + INSERT INTO gc_manifest_review_queue (registry_id, manifest_id, review_after, event) + VALUES (OLD.tag_registry_id, OLD.tag_manifest_id, gc_review_after('tag_switch'), 'tag_switch') + ON CONFLICT (registry_id, manifest_id) + DO UPDATE SET review_after = gc_review_after('tag_switch'), + event = 'tag_switch'; + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER gc_track_switched_tag_trigger + AFTER UPDATE OF tag_manifest_id + ON tags + FOR EACH ROW +EXECUTE PROCEDURE gc_track_switched_tags(); \ No newline at end of file diff --git a/registry/app/store/migrations/files/sqlite/000001_init.down.sql b/registry/app/store/migrations/files/sqlite/000001_init.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/registry/app/store/migrations/files/sqlite/000001_init.up.sql b/registry/app/store/migrations/files/sqlite/000001_init.up.sql new file mode 100644 index 000000000..fe1729845 --- /dev/null +++ b/registry/app/store/migrations/files/sqlite/000001_init.up.sql @@ -0,0 +1,330 @@ +create table registries +( + registry_id INTEGER PRIMARY KEY AUTOINCREMENT, + registry_name text not null + constraint registry_name_len_check + check (length(registry_name) <= 255), + registry_root_parent_id INTEGER not null, + registry_parent_id INTEGER not null, + registry_description text, + registry_type text not null, + registry_package_type text not null, + registry_upstream_proxies text, + registry_allowed_pattern text, + registry_blocked_pattern text, + registry_labels text, + registry_created_at INTEGER not null, + registry_updated_at INTEGER not null, + registry_created_by INTEGER not null, + registry_updated_by INTEGER not null, + constraint unique_registries + unique (registry_root_parent_id, registry_name) +); + +create table media_types +( + mt_id INTEGER PRIMARY KEY AUTOINCREMENT, + mt_media_type text not null + constraint unique_media_types_type + unique, + mt_created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000) +); + +create table blobs +( + blob_id INTEGER PRIMARY KEY AUTOINCREMENT, + blob_root_parent_id INTEGER not null, + blob_digest bytea not null, + blob_media_type_id INTEGER not null + constraint fk_blobs_media_type_id_media_types + references media_types(mt_id), + blob_size INTEGER not null, + blob_created_at INTEGER not null, + blob_created_by INTEGER not null, + constraint unique_digest_root_parent_id unique (blob_digest, blob_root_parent_id) + ); + +create index index_blobs_on_media_type_id + on blobs (blob_media_type_id); + +create table registry_blobs +( + rblob_id INTEGER PRIMARY KEY AUTOINCREMENT, + rblob_registry_id INTEGER not null + constraint fk_registry_blobs_rpstry_id_registries + references registries(registry_id) + on delete cascade, + rblob_blob_id INTEGER not null + constraint fk_registry_blobs_blob_id_blobs + references blobs(blob_id) + on delete cascade, + rblob_image_name text + constraint registry_blobs_image_len_check + check (length(rblob_image_name) <= 255), + rblob_created_at INTEGER not null, + rblob_updated_at INTEGER not null, + rblob_created_by INTEGER not null, + rblob_updated_by INTEGER not null, + + constraint unique_registry_blobs_registry_id_blob_id_image + unique (rblob_registry_id, rblob_blob_id, rblob_image_name) + ); + +create index index_registry_blobs_on_reg_id + on registry_blobs (rblob_registry_id); + +create index index_registry_blobs_on_reg_blob_id + on registry_blobs (rblob_registry_id, rblob_blob_id); + + + +create table manifests +( + manifest_id INTEGER PRIMARY KEY AUTOINCREMENT, + manifest_registry_id INTEGER not null + constraint fk_manifests_registry_id_registries + references registries(registry_id) + on delete cascade, + manifest_schema_version smallint not null, + manifest_media_type_id INTEGER not null + constraint fk_manifests_media_type_id_media_types + references media_types(mt_id), + manifest_artifact_media_type text, + manifest_total_size INTEGER not null, + manifest_configuration_media_type text, + manifest_configuration_payload bytea, + manifest_configuration_blob_id INTEGER + constraint fk_manifests_configuration_blob_id_blobs + references blobs(blob_id), + manifest_configuration_digest bytea, + manifest_digest bytea not null, + manifest_payload bytea not null, + manifest_non_conformant boolean default false, + manifest_non_distributable_layers boolean default false, + manifest_subject_id INTEGER, + manifest_subject_digest bytea, + manifest_annotations bytea, + manifest_image_name text not null + constraint manifests_img_name_len_check + check (length(manifest_image_name) <= 255), + manifest_created_at INTEGER not null, + manifest_created_by INTEGER not null, + manifest_updated_at INTEGER not null, + manifest_updated_by INTEGER not null, + constraint unique_manifests_registry_id_image_name_and_digest + unique (manifest_registry_id, manifest_image_name, manifest_digest), + constraint unique_manifests_registry_id_id_cfg_blob_id + unique (manifest_registry_id, manifest_id, manifest_configuration_blob_id), + constraint fk_manifests_subject_id_manifests + foreign key (manifest_subject_id) references manifests(manifest_id) + on delete cascade + ); + +create index index_manifests_on_media_type_id + on manifests (manifest_media_type_id); + +create index index_manifests_on_configuration_blob_id + on manifests (manifest_configuration_blob_id); + + + +create table manifest_references +( + manifest_ref_id INTEGER PRIMARY KEY AUTOINCREMENT, + manifest_ref_registry_id INTEGER not null, + manifest_ref_parent_id INTEGER not null, + manifest_ref_child_id INTEGER not null, + manifest_ref_created_at INTEGER not null, + manifest_ref_updated_at INTEGER not null, + manifest_ref_created_by INTEGER not null, + manifest_ref_updated_by INTEGER not null, + constraint unique_manifest_references_prt_id_chd_id + unique (manifest_ref_registry_id, manifest_ref_parent_id, manifest_ref_child_id), + constraint fk_manifest_ref_parent_id_manifests_manifest_id + foreign key (manifest_ref_parent_id) references manifests(manifest_id) + on delete cascade, + constraint fk_manifest_ref_child_id_manifests_manifest_id + foreign key (manifest_ref_child_id) references manifests(manifest_id), + constraint check_manifest_references_parent_id_and_child_id_differ + check (manifest_ref_parent_id <> manifest_ref_child_id) + ); + +create index index_manifest_references_on_rpstry_id_child_id + on manifest_references (manifest_ref_registry_id, manifest_ref_child_id); + +create table layers +( + layer_id INTEGER PRIMARY KEY AUTOINCREMENT, + layer_registry_id INTEGER not null, + layer_manifest_id INTEGER not null, + layer_media_type_id INTEGER not null + constraint fk_layer_media_type_id_media_types + references media_types(mt_id), + layer_blob_id INTEGER not null + constraint fk_layer_blob_id_blobs + references blobs(blob_id), + layer_size INTEGER not null, + layer_created_at INTEGER not null, + layer_updated_at INTEGER not null, + layer_created_by INTEGER not null, + layer_updated_by INTEGER not null, + constraint unique_layer_rpstry_id_and_mnfst_id_and_blob_id + unique (layer_registry_id, layer_manifest_id, layer_blob_id), + constraint unique_layer_rpstry_id_and_id_and_blob_id + unique (layer_registry_id, layer_id, layer_blob_id), + constraint fk_layer_manifest_id_and_manifests_manifest_id + foreign key (layer_manifest_id) references manifests(manifest_id) + on delete cascade + ); + +create index index_layer_on_media_type_id + on layers (layer_media_type_id); + +create index index_layer_on_blob_id + on layers (layer_blob_id); + +create table artifacts +( + artifact_id INTEGER PRIMARY KEY AUTOINCREMENT, + artifact_name text not null, + artifact_registry_id INTEGER not null + constraint fk_registries_registry_id + references registries(registry_id) + on delete cascade, + artifact_labels text, + artifact_enabled boolean default false, + artifact_created_at INTEGER, + artifact_updated_at INTEGER, + artifact_created_by INTEGER, + artifact_updated_by INTEGER, + constraint unique_artifact_registry_id_and_name unique (artifact_registry_id, artifact_name), + constraint check_artifact_name_length check ((length(artifact_name) <= 255)) + ); + +create index index_artifact_on_registry_id ON artifacts (artifact_registry_id); + + +create table artifact_stats +( + artifact_stat_id INTEGER PRIMARY KEY AUTOINCREMENT, + artifact_stat_artifact_id INTEGER not null + constraint fk_artifacts_artifact_id + references artifacts(artifact_id) on delete cascade, + artifact_stat_date INTEGER, + artifact_stat_download_count INTEGER, + artifact_stat_upload_bytes INTEGER, + artifact_stat_download_bytes INTEGER, + artifact_stat_created_at INTEGER not null, + artifact_stat_updated_at INTEGER not null, + artifact_stat_created_by INTEGER not null, + artifact_stat_updated_by INTEGER not null, + constraint unique_artifact_stats_artifact_id_and_date unique (artifact_stat_artifact_id, artifact_stat_date) + ); + +create table tags +( + tag_id INTEGER PRIMARY KEY AUTOINCREMENT, + tag_name text not null + constraint tag_name_len_check + check (length(tag_name) <= 128), + tag_image_name text not null + constraint tag_img_name_len_check + check (length(tag_image_name) <= 255), + tag_registry_id INTEGER not null, + tag_manifest_id INTEGER not null, + tag_created_at INTEGER, + tag_updated_at INTEGER, + tag_created_by INTEGER, + tag_updated_by INTEGER, + constraint fk_tag_manifest_id_and_manifests_manifest_id FOREIGN KEY + (tag_manifest_id) REFERENCES manifests (manifest_id) ON DELETE CASCADE, + constraint unique_tag_registry_id_and_name_and_image_name + unique (tag_registry_id, tag_name, tag_image_name) + ); + +create index index_tag_on_rpository_id_and_manifest_id + on tags (tag_registry_id, tag_manifest_id); + +create table upstream_proxy_configs +( + upstream_proxy_config_id INTEGER PRIMARY KEY AUTOINCREMENT, + upstream_proxy_config_registry_id INTEGER not null + constraint fk_upstream_proxy_config_registry_id + references registries(registry_id) + on delete cascade, + upstream_proxy_config_source text, + upstream_proxy_config_url text, + upstream_proxy_config_auth_type text not null, + upstream_proxy_config_user_name text, + upstream_proxy_config_secret_identifier text, + upstream_proxy_config_secret_space_id int, + upstream_proxy_config_token text, + upstream_proxy_config_created_at INTEGER, + upstream_proxy_config_updated_at INTEGER, + upstream_proxy_config_created_by INTEGER, + upstream_proxy_config_updated_by INTEGER, + constraint fk_layers_secret_identifier_and_secret_space_id FOREIGN KEY + (upstream_proxy_config_secret_identifier, upstream_proxy_config_secret_space_id) REFERENCES secrets(secret_uid, secret_space_id) + ON DELETE CASCADE +); + +create index index_upstream_proxy_config_on_registry_id + on upstream_proxy_configs (upstream_proxy_config_registry_id); + +create table cleanup_policies +( + cp_id INTEGER PRIMARY KEY AUTOINCREMENT, + cp_registry_id INTEGER not null + constraint fk_cleanup_policies_registry_id + references registries(registry_id) ON DELETE CASCADE, + cp_name text, + cp_expiry_time_ms INTEGER, + cp_created_at INTEGER not null, + cp_updated_at INTEGER not null, + cp_created_by INTEGER not null, + cp_updated_by INTEGER not null +); + +create index index_cleanup_policies_on_registry_id + on cleanup_policies (cp_registry_id); + +create table cleanup_policy_prefix_mappings +( + cpp_id INTEGER PRIMARY KEY AUTOINCREMENT, + cpp_cleanup_policy_id INTEGER not null + constraint fk_cleanup_policy_prefix_registry_id + references cleanup_policies(cp_id) ON DELETE CASCADE, + cpp_prefix text not null, + cpp_prefix_type text not null +); + +create index index_cleanup_policy_map_on_policy_id + on cleanup_policy_prefix_mappings (cpp_cleanup_policy_id); + + + +insert into media_types (mt_media_type) +values ('application/vnd.docker.distribution.manifest.v1+json'), + ('application/vnd.docker.distribution.manifest.v1+prettyjws'), + ('application/vnd.docker.distribution.manifest.v2+json'), + ('application/vnd.docker.distribution.manifest.list.v2+json'), + ('application/vnd.docker.image.rootfs.diff.tar'), + ('application/vnd.docker.image.rootfs.diff.tar.gzip'), + ('application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'), + ('application/vnd.docker.container.image.v1+json'), + ('application/vnd.docker.container.image.rootfs.diff+x-gtar'), + ('application/vnd.docker.plugin.v1+json'), + ('application/vnd.oci.image.layer.v1.tar'), + ('application/vnd.oci.image.layer.v1.tar+gzip'), + ('application/vnd.oci.image.layer.v1.tar+zstd'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar'), + ('application/vnd.oci.image.layer.nondistributable.v1.tar+gzip'), + ('application/vnd.oci.image.config.v1+json'), + ('application/vnd.oci.image.manifest.v1+json'), + ('application/vnd.oci.image.index.v1+json'), + ('application/vnd.cncf.helm.config.v1+json'), + ('application/tar+gzip'), + ('application/octet-stream'), + ('application/vnd.buildkit.cacheconfig.v0'), + ('application/vnd.cncf.helm.chart.content.v1.tar+gzip'), + ('application/vnd.cncf.helm.chart.provenance.v1.prov'); diff --git a/registry/app/store/migrations/migrator.go b/registry/app/store/migrations/migrator.go new file mode 100644 index 000000000..136bdda86 --- /dev/null +++ b/registry/app/store/migrations/migrator.go @@ -0,0 +1,62 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migrations + +import ( + "errors" + "fmt" + "strings" + + "github.com/golang-migrate/migrate/v4" + "github.com/rs/zerolog/log" +) + +// Migrate orchestrates database migrations using golang-migrate. +func Migrate(m *migrate.Migrate) error { + version, dirty, err := m.Version() + if err != nil && !errors.Is(err, migrate.ErrNilVersion) { + log.Info().Msg("failed to fetch schema version from db.") + return err + } + log.Info().Msgf("current version %d", version) + if dirty { + prev := int(version) - 1 + log.Info().Msg(fmt.Sprintf("schema is dirty at version = %d. Forcing version to %d", int(version), prev)) + err = m.Force(prev) + if err != nil { + log.Error().Stack().Err(err).Msg(fmt.Sprintf("failed to force schema version to %d %s", prev, err)) + return err + } + } + err = m.Up() + if errors.Is(err, migrate.ErrNoChange) { + log.Info().Msg("No change to schema. No migrations were run") + return nil + } + + if err != nil && strings.Contains(err.Error(), "no migration found") { + // The library throws this error when a give migration file does not exist. Unfortunately, we do not have + // an error constant to compare with + log.Error().Stack().Err(err).Msg("skipping migration because migration file was not found") + return nil + } + + if err != nil { + log.Error().Stack().Err(err).Msg("failed to run db migrations") + return fmt.Errorf("error when migration up: %w", err) + } + log.Info().Msg("Migrations successfully completed") + return nil +} diff --git a/registry/app/store/model/database.go b/registry/app/store/model/database.go new file mode 100644 index 000000000..0524a0686 --- /dev/null +++ b/registry/app/store/model/database.go @@ -0,0 +1,32 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Database ... +type Database struct { + Type string `json:"type"` + PostGreSQL *PostGreSQL `json:"postgresql,omitempty"` + Sqlite *Sqlite +} + +// PostGreSQL ... +type PostGreSQL struct { + Datasource string +} + +// PostGreSQL ... +type Sqlite struct { + Datasource string +} diff --git a/registry/config/const.go b/registry/config/const.go new file mode 100644 index 000000000..50ceaee98 --- /dev/null +++ b/registry/config/const.go @@ -0,0 +1,21 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// const variables. +const ( + PostgresqlDatabase = "postgres" + Sqlite = "sqlite3" +) diff --git a/registry/config/helper.go b/registry/config/helper.go new file mode 100644 index 000000000..f21430343 --- /dev/null +++ b/registry/config/helper.go @@ -0,0 +1,47 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import "github.com/harness/gitness/types" + +func GetS3StorageParameters(c *types.Config) map[string]interface{} { + s3Properties := make(map[string]interface{}) + s3Properties["accesskey"] = c.Registry.Storage.S3Storage.AccessKey + s3Properties["secretkey"] = c.Registry.Storage.S3Storage.SecretKey + s3Properties["region"] = c.Registry.Storage.S3Storage.Region + s3Properties["regionendpoint"] = c.Registry.Storage.S3Storage.RegionEndpoint + s3Properties["forcepathstyle"] = c.Registry.Storage.S3Storage.ForcePathStyle + s3Properties["accelerate"] = c.Registry.Storage.S3Storage.Accelerate + s3Properties["bucket"] = c.Registry.Storage.S3Storage.Bucket + s3Properties["encrypt"] = c.Registry.Storage.S3Storage.Encrypt + s3Properties["keyid"] = c.Registry.Storage.S3Storage.KeyID + s3Properties["secure"] = c.Registry.Storage.S3Storage.Secure + s3Properties["v4auth"] = c.Registry.Storage.S3Storage.V4Auth + s3Properties["chunksize"] = c.Registry.Storage.S3Storage.ChunkSize + s3Properties["multipartcopychunksize"] = c.Registry.Storage.S3Storage.MultipartCopyChunkSize + s3Properties["multipartcopymaxconcurrency"] = c.Registry.Storage.S3Storage.MultipartCopyMaxConcurrency + s3Properties["multipartcopythresholdsize"] = c.Registry.Storage.S3Storage.MultipartCopyThresholdSize + s3Properties["rootdirectory"] = c.Registry.Storage.S3Storage.RootDirectory + s3Properties["usedualstack"] = c.Registry.Storage.S3Storage.UseDualStack + s3Properties["loglevel"] = c.Registry.Storage.S3Storage.LogLevel + return s3Properties +} + +func GetFilesystemParams(c *types.Config) map[string]interface{} { + props := make(map[string]interface{}) + props["maxthreads"] = c.Registry.Storage.FileSystemStorage.MaxThreads + props["rootdirectory"] = c.Registry.Storage.FileSystemStorage.RootDirectory + return props +} diff --git a/registry/config/openapi/artifact-services.yaml b/registry/config/openapi/artifact-services.yaml new file mode 100644 index 000000000..57078df0c --- /dev/null +++ b/registry/config/openapi/artifact-services.yaml @@ -0,0 +1,10 @@ +package: artifact +generate: + strict-server: true + chi-server: true + embedded-spec: true +output: ./registry/app/api/openapi/contracts/artifact/services.gen.go +compatibility: + old-aliasing: true + old-enum-conflicts: true + old-merge-schemas: true diff --git a/registry/config/openapi/artifact-types.yaml b/registry/config/openapi/artifact-types.yaml new file mode 100644 index 000000000..ef4044252 --- /dev/null +++ b/registry/config/openapi/artifact-types.yaml @@ -0,0 +1,8 @@ +package: artifact +generate: + models: true +output: ./registry/app/api/openapi/contracts/artifact/types.gen.go +compatibility: + old-aliasing: true + old-enum-conflicts: true + old-merge-schemas: true diff --git a/registry/docs/docs.go b/registry/docs/docs.go new file mode 100644 index 000000000..47f790030 --- /dev/null +++ b/registry/docs/docs.go @@ -0,0 +1,80 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "API Support", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/v1/healthz": { + "get": { + "description": "Health API for Artifact-Registry Service", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Health API for Artifact-Registry Service", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + } + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:9091", + BasePath: "/", + Schemes: []string{"http"}, + Title: "Swagger Doc- Artifact-Registry", + Description: "Client to connect to artifact-registry APIs.", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/registry/docs/swagger.json b/registry/docs/swagger.json new file mode 100644 index 000000000..8baf20930 --- /dev/null +++ b/registry/docs/swagger.json @@ -0,0 +1,45 @@ +{ + "schemes": [ + "http" + ], + "swagger": "2.0", + "info": { + "description": "Client to connect to artifact-registry APIs.", + "title": "Swagger Doc- Artifact-Registry", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "API Support", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "1.0" + }, + "host": "localhost:9091", + "basePath": "/", + "paths": { + "/v1/healthz": { + "get": { + "description": "Health API for Artifact-Registry Service", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Health API for Artifact-Registry Service", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/registry/docs/swagger.yaml b/registry/docs/swagger.yaml new file mode 100644 index 000000000..c82f4d9c6 --- /dev/null +++ b/registry/docs/swagger.yaml @@ -0,0 +1,31 @@ +basePath: / +host: localhost:9091 +info: + contact: + email: support@swagger.io + name: API Support + url: http://www.swagger.io/support + description: Client to connect to artifact-registry APIs. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: Swagger Doc- Artifact-Registry + version: "1.0" +paths: + /v1/healthz: + get: + consumes: + - application/json + description: Health API for Artifact-Registry Service + produces: + - application/json + responses: + "200": + description: OK + schema: + type: string + summary: Health API for Artifact-Registry Service +schemes: +- http +swagger: "2.0" diff --git a/registry/gc/garbagecollector.go b/registry/gc/garbagecollector.go new file mode 100644 index 000000000..edd30c45f --- /dev/null +++ b/registry/gc/garbagecollector.go @@ -0,0 +1,68 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gc + +import ( + "context" + "time" + + corestore "github.com/harness/gitness/app/store" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/store" + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types" + + "github.com/jmoiron/sqlx" +) + +type Noop struct{} + +func New() Service { + return &Noop{} +} + +func (s *Noop) Start( + _ context.Context, _ *sqlx.DB, _ corestore.SpaceStore, + _ store.BlobRepository, _ storagedriver.StorageDeleter, + _ *types.Config, _ store.MediaTypesRepository, _ store.ManifestRepository, +) { + // NOOP +} + +func (s *Noop) BlobFindAndLockBefore(_ context.Context, _ int64, _ time.Time) (*registrytypes.GCBlobTask, error) { + // NOOP + //nolint:nilnil + return nil, nil +} + +func (s *Noop) BlobReschedule(_ context.Context, _ *registrytypes.GCBlobTask, _ time.Duration) error { + // NOOP + return nil +} + +func (s *Noop) ManifestFindAndLockBefore(_ context.Context, _, _ int64, _ time.Time) ( + *registrytypes.GCManifestTask, error, +) { + // NOOP + //nolint:nilnil + return nil, nil +} + +func (s *Noop) ManifestFindAndLockNBefore(_ context.Context, _ int64, _ []int64, _ time.Time) ( + []*registrytypes.GCManifestTask, error, +) { + // NOOP + return nil, nil +} diff --git a/registry/gc/interface.go b/registry/gc/interface.go new file mode 100644 index 000000000..9c0091139 --- /dev/null +++ b/registry/gc/interface.go @@ -0,0 +1,46 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gc + +import ( + "context" + "time" + + corestore "github.com/harness/gitness/app/store" + storagedriver "github.com/harness/gitness/registry/app/driver" + "github.com/harness/gitness/registry/app/store" + registrytypes "github.com/harness/gitness/registry/types" + "github.com/harness/gitness/types" + + "github.com/jmoiron/sqlx" +) + +type Service interface { + Start( + ctx context.Context, sqlDB *sqlx.DB, spaceStore corestore.SpaceStore, + blobRepo store.BlobRepository, storageDeleter storagedriver.StorageDeleter, + config *types.Config, mtRepository store.MediaTypesRepository, manifestRepository store.ManifestRepository, + ) + BlobFindAndLockBefore(ctx context.Context, blobID int64, date time.Time) (*registrytypes.GCBlobTask, error) + BlobReschedule(ctx context.Context, b *registrytypes.GCBlobTask, d time.Duration) error + ManifestFindAndLockBefore( + ctx context.Context, registryID, manifestID int64, + date time.Time, + ) (*registrytypes.GCManifestTask, error) + ManifestFindAndLockNBefore( + ctx context.Context, registryID int64, manifestIDs []int64, + date time.Time, + ) ([]*registrytypes.GCManifestTask, error) +} diff --git a/registry/gc/wire.go b/registry/gc/wire.go new file mode 100644 index 000000000..f94278a46 --- /dev/null +++ b/registry/gc/wire.go @@ -0,0 +1,31 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gc + +import ( + storagedriver "github.com/harness/gitness/registry/app/driver" + + "github.com/google/wire" +) + +func StorageDeleterProvider(driver storagedriver.StorageDriver) storagedriver.StorageDeleter { + return driver +} + +func ServiceProvider() Service { + return New() +} + +var WireSet = wire.NewSet(StorageDeleterProvider, ServiceProvider) diff --git a/registry/tests/conformance_test.sh b/registry/tests/conformance_test.sh new file mode 100755 index 000000000..341ab3627 --- /dev/null +++ b/registry/tests/conformance_test.sh @@ -0,0 +1,102 @@ +#!/bin/bash +set -e + +echo "get the conformance testing code..." +git clone https://github.com/opencontainers/distribution-spec.git + +function createSpace { + echo "Creating space... $2" + curl --location --request POST "http://$1/api/v1/spaces" \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer '"$3" \ + --header 'Accept: application/json' \ + --data "{\"description\": \"corformance test\", \"identifier\": \"$2\",\"is_public\": true, \"parent_ref\": \"\"}" +} + + +function createRegistry { + echo "Creating registry: $2" + curl --location "http://$1/api/v1/registry" \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer '"$4" \ + --header 'Accept: application/json' \ + --data "{\"config\":{\"type\": \"VIRTUAL\"}, \"description\": \"mydesc\", \"identifier\": \"$2\", \"packageType\": \"DOCKER\",\"parentRef\": \"$3\"}" +} + +function login { + # Define the URL and request payload + url="http://$1/api/v1/login?include_cookie=false" + payload='{ + "login_identifier": "admin", + "password": "changeit" + }' + + # Make the curl call and capture the response + response=$(curl -s -X 'POST' "$url" -H 'accept: application/json' -H 'Content-Type: application/json' -d "$payload") + + # Extract the access_token using jq + access_token=$(echo "$response" | jq -r '.access_token') + + # Check if jq command succeeded + if [ $? -ne 0 ]; then + echo "Failed to parse access_token" + exit 1 + fi + + # Print the access_token +# echo "Access Token: $access_token" + echo "$access_token" +} + +function getPat { + # Define the URL and request payload + url="http://$1/api/v1/user/tokens" + payload="{\"uid\":\"code_token_$2\"}" + + # Make the curl call and capture the response + response=$(curl -s -X 'POST' "$url" -H 'accept: application/json' -H 'Content-Type: application/json' -H 'Cookie: token='"$3" -d "$payload") + + # Extract the access_token using jq + access_token=$(echo "$response" | jq -r '.access_token') + + # Check if jq command succeeded + if [ $? -ne 0 ]; then + echo "Failed to parse access_token" + exit 1 + fi + + # Print the access_token +# echo "Access Token: $access_token" + echo "$access_token" +} + + +epoch=$(date +%s) + +space="Space_$epoch" +space_lower=$(echo $space | tr '[:upper:]' '[:lower:]') +conformance="conformance_$epoch" +crossmount="crossmount_$epoch" + +token=$(login $1) +pat=$(getPat $1 $epoch $token) +createSpace $1 $space $token +createRegistry $1 $conformance $space $token +createRegistry $1 $crossmount $space $token + +echo "run conformance test..." +export OCI_ROOT_URL="http://$1" +export OCI_NAMESPACE="$space_lower/$conformance/testrepo" +export OCI_DEBUG="true" + +export OCI_TEST_PUSH=1 +export OCI_TEST_PULL=1 +export OCI_TEST_CONTENT_DISCOVERY=1 +export OCI_TEST_CONTENT_MANAGEMENT=1 +export OCI_CROSSMOUNT_NAMESPACE="$space_lower/$crossmount/testrepo" +export OCI_AUTOMATIC_CROSSMOUNT="false" + +export OCI_USERNAME="admin" +export OCI_PASSWORD="$pat" +cd ./distribution-spec/conformance +go test . \ No newline at end of file diff --git a/registry/types/artifact.go b/registry/types/artifact.go new file mode 100644 index 000000000..60c74a10b --- /dev/null +++ b/registry/types/artifact.go @@ -0,0 +1,32 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" +) + +// Artifact DTO object. +type Artifact struct { + ID int64 + Name string + RegistryID int64 + Labels []string + Enabled bool + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/registry/types/artifact_stat.go b/registry/types/artifact_stat.go new file mode 100644 index 000000000..489b0fa08 --- /dev/null +++ b/registry/types/artifact_stat.go @@ -0,0 +1,33 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" +) + +// ArtifactStat DTO object. +type ArtifactStat struct { + ID int64 + ArtifactID int64 + Date int64 + DownloadCount int64 + UploadBytes int64 + DownloadBytes int64 + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/registry/types/blob.go b/registry/types/blob.go new file mode 100644 index 000000000..8963e3d47 --- /dev/null +++ b/registry/types/blob.go @@ -0,0 +1,38 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" + + "github.com/opencontainers/go-digest" +) + +// Blob DTO object. +type Blob struct { + ID int64 + RootParentID int64 + // This media type is for S3. The caller should look this up + // and override the value for the specific repository. + MediaType string + MediaTypeID int64 + Digest digest.Digest + Size int64 + CreatedAt time.Time + CreatedBy int64 +} + +// Blobs is a slice of Blob pointers. +type Blobs []*Blob diff --git a/registry/types/cleanuppolicy.go b/registry/types/cleanuppolicy.go new file mode 100644 index 000000000..413140e26 --- /dev/null +++ b/registry/types/cleanuppolicy.go @@ -0,0 +1,43 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" + + "github.com/harness/gitness/registry/types/enum" +) + +// CleanupPolicy DTO object. +type CleanupPolicy struct { + ID int64 + RegistryID int64 + Name string + VersionPrefix []string + PackagePrefix []string + ExpiryTime int64 + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} + +// CleanupPolicyPrefix DTO object. +type CleanupPolicyPrefix struct { + ID int64 + CleanupPolicyID int64 + Prefix string + PrefixType enum.PrefixType +} diff --git a/registry/types/configuration.go b/registry/types/configuration.go new file mode 100644 index 000000000..06355386c --- /dev/null +++ b/registry/types/configuration.go @@ -0,0 +1,29 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "github.com/opencontainers/go-digest" + +type Configuration struct { + MediaType string + BlobID int64 + Digest digest.Digest + // Payload is the JSON payload of a manifest configuration. + // For operational safety reasons, + // a payload is only saved in this attribute if its size + // does not exceed a predefined + // limit (see handlers.dbConfigSizeLimit). + Payload Payload +} diff --git a/registry/types/digest.go b/registry/types/digest.go new file mode 100644 index 000000000..f5a7d4083 --- /dev/null +++ b/registry/types/digest.go @@ -0,0 +1,127 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + + "github.com/harness/gitness/registry/app/store/database/util" + + "github.com/opencontainers/go-digest" +) + +// Digest is the database representation of a digest, stored in the format ``. +type Digest string + +const ( + // Algorithm prefixes are sequences of two digits. These should never change, only additions are allowed. + sha256DigestAlgorithmPrefix = "01" + sha512DigestAlgorithmPrefix = "02" +) + +func GetDigestBytes(dgst digest.Digest) ([]byte, error) { + if len(dgst.String()) == 0 { + return nil, nil + } + + newDigest, err := NewDigest(dgst) + if err != nil { + return nil, err + } + + digestBytes, err := util.GetHexDecodedBytes(string(newDigest)) + if err != nil { + return nil, err + } + return digestBytes, nil +} + +// String implements the Stringer interface. +func (d Digest) String() string { + return string(d) +} + +// NewDigest builds a Digest based on a digest.Digest. +func NewDigest(d digest.Digest) (Digest, error) { + if err := d.Validate(); err != nil { + return "", err + } + + var algPrefix string + switch d.Algorithm() { + case digest.SHA256: + algPrefix = sha256DigestAlgorithmPrefix + case digest.SHA512: + algPrefix = sha512DigestAlgorithmPrefix + case digest.SHA384: + return "", fmt.Errorf("unimplemented algorithm %q", digest.SHA384) + default: + return "", fmt.Errorf("unknown algorithm %q", d.Algorithm()) + } + + return Digest(fmt.Sprintf("%s%s", algPrefix, d.Hex())), nil +} + +// Parse maps a Digest to a digest.Digest. +func (d Digest) Parse() (digest.Digest, error) { + str := d.String() + valid, err := d.validate(str) + if !valid { + return "", err + } + algPrefix := str[:2] + if len(str) == 2 { + return "", errors.New("no checksum") + } + + var alg digest.Algorithm + switch algPrefix { + case sha256DigestAlgorithmPrefix: + alg = digest.SHA256 + case sha512DigestAlgorithmPrefix: + alg = digest.SHA512 + default: + return "", fmt.Errorf("unknown algorithm prefix %q", algPrefix) + } + + dgst := digest.NewDigestFromHex(alg.String(), str[2:]) + if err := dgst.Validate(); err != nil { + return "", err + } + + return dgst, nil +} + +func (d Digest) validate(str string) (bool, error) { + if len(str) == 0 { + return false, nil + } + if len(str) < 2 { + return false, errors.New("invalid digest length") + } + return true, nil +} + +// HexDecode decodes binary data from a textual representation. +// The output is equivalent to the PostgreSQL binary `decode` function with the hex textual format. See +// https://www.postgresql.org/docs/14/functions-binarystring.html. +func (d Digest) HexDecode() string { + return fmt.Sprintf("\\x%s", d.String()) +} + +func (d Digest) Validate() string { + return fmt.Sprintf("\\x%s", d.String()) +} diff --git a/registry/types/enum/prefix_type.go b/registry/types/enum/prefix_type.go new file mode 100644 index 000000000..5d04c02d4 --- /dev/null +++ b/registry/types/enum/prefix_type.go @@ -0,0 +1,22 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package enum + +type PrefixType string + +const ( + PrefixTypeVersion PrefixType = "version" + PrefixTypePackage PrefixType = "package" +) diff --git a/registry/types/filter_params.go b/registry/types/filter_params.go new file mode 100644 index 000000000..8aff355d2 --- /dev/null +++ b/registry/types/filter_params.go @@ -0,0 +1,29 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +type SortOrder string + +type FilterParams struct { + SortOrder SortOrder + OrderBy string + Name string + BeforeEntry string + LastEntry string + PublishedAt string + MaxEntries int + IncludeReferrers bool + ReferrerTypes []string +} diff --git a/registry/types/gc.go b/registry/types/gc.go new file mode 100644 index 000000000..40dab1b9c --- /dev/null +++ b/registry/types/gc.go @@ -0,0 +1,43 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" +) + +type GCBlobTask struct { + BlobID int64 + ReviewAfter time.Time + ReviewCount int + CreatedAt time.Time + Event string +} + +// GCManifestTask represents a row in the gc_manifest_review_queue table. +type GCManifestTask struct { + RegistryID int64 + ManifestID int64 + ReviewAfter time.Time + ReviewCount int + CreatedAt time.Time + Event string +} + +// GCReviewAfterDefault represents a row in the gc_review_after_defaults table. +type GCReviewAfterDefault struct { + Event string + Value time.Duration +} diff --git a/registry/types/jsonb.go b/registry/types/jsonb.go new file mode 100644 index 000000000..bff3cced5 --- /dev/null +++ b/registry/types/jsonb.go @@ -0,0 +1,42 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "errors" + "fmt" +) + +type JSONB map[string]string + +// Scan implements the sql.Scanner interface for JSONB. +func (j *JSONB) Scan(value interface{}) error { + if value == nil { + *j = nil + return nil + } + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint( + "Failed to unmarshal JSONB value:", value)) + } + var m map[string]string + if err := json.Unmarshal(bytes, &m); err != nil { + return err + } + *j = m + return nil +} diff --git a/registry/types/layer.go b/registry/types/layer.go new file mode 100644 index 000000000..a17ee99d2 --- /dev/null +++ b/registry/types/layer.go @@ -0,0 +1,31 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "time" + +// Layer DTO object. +type Layer struct { + ID int64 + RegistryID int64 + ManifestID int64 + MediaTypeID int64 + BlobID int64 + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/registry/types/manifest.go b/registry/types/manifest.go new file mode 100644 index 000000000..bd0aca7eb --- /dev/null +++ b/registry/types/manifest.go @@ -0,0 +1,54 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "database/sql" + "time" + + "github.com/opencontainers/go-digest" +) + +// Manifest DTO object. +type Manifest struct { + ID int64 + RegistryID int64 + TotalSize int64 + SchemaVersion int + MediaType string + MediaTypeID int64 + ImageName string + ArtifactType sql.NullString + Digest digest.Digest + Payload Payload + Configuration *Configuration + SubjectID sql.NullInt64 + SubjectDigest digest.Digest + NonConformant bool + // NonDistributableLayers identifies whether a manifest + // references foreign/non-distributable layers. For now, we are + // not registering metadata about these layers, + // but we may wish to backfill that metadata in the future by parsing + // the manifest payload. + NonDistributableLayers bool + Annotations JSONB + CreatedAt time.Time + CreatedBy int64 + UpdatedAt time.Time + UpdatedBy int64 +} + +// Manifests is a slice of Manifest pointers. +type Manifests []*Manifest diff --git a/registry/types/manifest_reference.go b/registry/types/manifest_reference.go new file mode 100644 index 000000000..ba26e46fd --- /dev/null +++ b/registry/types/manifest_reference.go @@ -0,0 +1,29 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "time" + +// ManifestReference DTO object. +type ManifestReference struct { + ID int64 + RegistryID int64 + ParentID int64 + ChildID int64 + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/registry/types/payload.go b/registry/types/payload.go new file mode 100644 index 000000000..2ebdef32c --- /dev/null +++ b/registry/types/payload.go @@ -0,0 +1,21 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" +) + +type Payload json.RawMessage diff --git a/registry/types/registry.go b/registry/types/registry.go new file mode 100644 index 000000000..f5ae4dab4 --- /dev/null +++ b/registry/types/registry.go @@ -0,0 +1,40 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" +) + +// Registry DTO object. +type Registry struct { + ID int64 + Name string + ParentID int64 + RootParentID int64 + Description string + Type artifact.RegistryType + PackageType artifact.PackageType + UpstreamProxies []int64 + AllowedPattern []string + BlockedPattern []string + Labels []string + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/registry/types/tag.go b/registry/types/tag.go new file mode 100644 index 000000000..35f08d14f --- /dev/null +++ b/registry/types/tag.go @@ -0,0 +1,63 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" +) + +// Tag DTO object. +type Tag struct { + ID int64 + Name string + ImageName string + RegistryID int64 + ManifestID int64 + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} + +type ArtifactMetadata struct { + Name string + RepoName string + DownloadCount int64 + PackageType artifact.PackageType + Labels []string + LatestVersion string + CreatedAt time.Time + ModifiedAt time.Time +} + +type TagMetadata struct { + Name string + Size string + PackageType artifact.PackageType + DigestCount int + IsLatestVersion bool + ModifiedAt time.Time +} + +type TagDetail struct { + ID int64 + Name string + ImageName string + CreatedAt time.Time + UpdatedAt time.Time + Size string +} diff --git a/registry/types/upstream_proxy_config.go b/registry/types/upstream_proxy_config.go new file mode 100644 index 000000000..613cbaaea --- /dev/null +++ b/registry/types/upstream_proxy_config.go @@ -0,0 +1,60 @@ +// Copyright 2023 Harness, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "time" + + "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact" +) + +// UpstreamProxyConfig DTO object. +type UpstreamProxyConfig struct { + ID int64 + RegistryID int64 + Source string + URL string + AuthType string + UserName string + Password string + SecretIdentifier string + SecretSpaceID int + Token string + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} + +type UpstreamProxy struct { + ID int64 + RegistryID int64 + RepoKey string + ParentID string + PackageType artifact.PackageType + AllowedPattern []string + BlockedPattern []string + Source string + RepoURL string + RepoAuthType string + UserName string + SecretIdentifier string + SecretSpaceID int + Token string + CreatedAt time.Time + UpdatedAt time.Time + CreatedBy int64 + UpdatedBy int64 +} diff --git a/store/database/store.go b/store/database/store.go index 8a6861b4d..e75ca64b7 100644 --- a/store/database/store.go +++ b/store/database/store.go @@ -61,14 +61,21 @@ func Connect(ctx context.Context, driver string, datasource string) (*sqlx.DB, e } // ConnectAndMigrate creates the database handle and migrates the database. -func ConnectAndMigrate(ctx context.Context, driver string, datasource string, migrator Migrator) (*sqlx.DB, error) { +func ConnectAndMigrate( + ctx context.Context, + driver string, + datasource string, + migrators ...Migrator, +) (*sqlx.DB, error) { dbx, err := Connect(ctx, driver, datasource) if err != nil { return nil, err } - if err = migrator(ctx, dbx); err != nil { - return nil, fmt.Errorf("failed to setup the db: %w", err) + for _, migrator := range migrators { + if err = migrator(ctx, dbx); err != nil { + return nil, fmt.Errorf("failed to setup the db: %w", err) + } } return dbx, nil diff --git a/store/database/util.go b/store/database/util.go index 2b3ad65fc..b66bcbe7d 100644 --- a/store/database/util.go +++ b/store/database/util.go @@ -60,6 +60,8 @@ func ProcessSQLErrorf(ctx context.Context, err error, format string, args ...int translatedError = store.ErrResourceNotFound case isSQLUniqueConstraintError(err): translatedError = store.ErrDuplicate + case isSQLForeignKeyViolationError(err): + translatedError = store.ErrForeignKeyViolation default: } diff --git a/store/database/util_no_sqlite.go b/store/database/util_no_sqlite.go index 3405713e0..577c6abbd 100644 --- a/store/database/util_no_sqlite.go +++ b/store/database/util_no_sqlite.go @@ -30,3 +30,12 @@ func isSQLUniqueConstraintError(original error) bool { return false } + +func isSQLForeignKeyViolationError(original error) bool { + var pqErr *pq.Error + if errors.As(original, &pqErr) { + return pqErr.Code == "23503" + } + + return false +} diff --git a/store/database/util_sqlite.go b/store/database/util_sqlite.go index 62fe04bba..b99ab951c 100644 --- a/store/database/util_sqlite.go +++ b/store/database/util_sqlite.go @@ -18,6 +18,7 @@ package database import ( + "github.com/jackc/pgerrcode" "github.com/lib/pq" "github.com/mattn/go-sqlite3" "github.com/pkg/errors" @@ -37,3 +38,19 @@ func isSQLUniqueConstraintError(original error) bool { return false } + +func isSQLForeignKeyViolationError(original error) bool { + var sqliteErr sqlite3.Error + if errors.As(original, &sqliteErr) { + return errors.Is(sqliteErr.ExtendedCode, sqlite3.ErrConstraintForeignKey) + } + + var pqErr *pq.Error + // this can happen if the child manifest is deleted by + // the online GC while attempting to create the list + if errors.As(original, &pqErr) && pqErr.Code == pgerrcode.ForeignKeyViolation { + return true + } + + return false +} diff --git a/store/errors.go b/store/errors.go index 4e2588999..76d5f42b3 100644 --- a/store/errors.go +++ b/store/errors.go @@ -19,6 +19,7 @@ import "errors" var ( ErrResourceNotFound = errors.New("resource not found") ErrDuplicate = errors.New("resource is a duplicate") + ErrForeignKeyViolation = errors.New("foreign resource does not exists") ErrVersionConflict = errors.New("resource version conflict") ErrPathTooLong = errors.New("the path is too long") ErrPrimaryPathAlreadyExists = errors.New("primary path already exists for resource") diff --git a/types/config.go b/types/config.go index 73136c953..a38e48757 100644 --- a/types/config.go +++ b/types/config.go @@ -89,6 +89,10 @@ type Config struct { // (either running directly or via a port exposed in a docker container). // Value is derived from HTTP.Server unless explicitly specified (e.g. http://host.docker.internal:3000). Container string `envconfig:"GITNESS_URL_CONTAINER"` + + // Registry is used as a base to generate external facing URLs. + // Value is derived from Base unless explicitly specified (e.g. http://localhost:3000). + Registry string `envconfig:"GITNESS_REGISTRY_URL"` } // Git defines the git configuration parameters @@ -423,6 +427,61 @@ type Config struct { } } + Registry struct { + Enable bool `envconfig:"GITNESS_REGISTRY_ENABLED" default:"false"` + Storage struct { + // StorageType defines the type of storage to use for the registry. Options are: `filesystem`, `s3aws` + StorageType string `envconfig:"GITNESS_REGISTRY_STORAGE_TYPE" default:"filesystem"` + + // FileSystemStorage defines the configuration for the filesystem storage if StorageType is `filesystem`. + FileSystemStorage struct { + MaxThreads int `envconfig:"GITNESS_REGISTRY_FILESYSTEM_MAX_THREADS" default:"100"` + RootDirectory string `envconfig:"GITNESS_REGISTRY_FILESYSTEM_ROOT_DIRECTORY"` + } + + // S3Storage defines the configuration for the S3 storage if StorageType is `s3aws`. + S3Storage struct { + AccessKey string `envconfig:"GITNESS_REGISTRY_S3_ACCESS_KEY"` + SecretKey string `envconfig:"GITNESS_REGISTRY_S3_SECRET_KEY"` + Region string `envconfig:"GITNESS_REGISTRY_S3_REGION"` + RegionEndpoint string `envconfig:"GITNESS_REGISTRY_S3_REGION_ENDPOINT"` + ForcePathStyle bool `envconfig:"GITNESS_REGISTRY_S3_FORCE_PATH_STYLE" default:"true"` + Accelerate bool `envconfig:"GITNESS_REGISTRY_S3_ACCELERATED" default:"false"` + Bucket string `envconfig:"GITNESS_REGISTRY_S3_BUCKET"` + Encrypt bool `envconfig:"GITNESS_REGISTRY_S3_ENCRYPT" default:"false"` + KeyID string `envconfig:"GITNESS_REGISTRY_S3_KEY_ID"` + Secure bool `envconfig:"GITNESS_REGISTRY_S3_SECURE" default:"true"` + V4Auth bool `envconfig:"GITNESS_REGISTRY_S3_V4_AUTH" default:"true"` + ChunkSize int `envconfig:"GITNESS_REGISTRY_S3_CHUNK_SIZE" default:"10485760"` + MultipartCopyChunkSize int `envconfig:"GITNESS_REGISTRY_S3_MULTIPART_COPY_CHUNK_SIZE" default:"33554432"` + MultipartCopyMaxConcurrency int `envconfig:"GITNESS_REGISTRY_S3_MULTIPART_COPY_MAX_CONCURRENCY" default:"100"` + MultipartCopyThresholdSize int `envconfig:"GITNESS_REGISTRY_S3_MULTIPART_COPY_THRESHOLD_SIZE" default:"33554432"` //nolint:lll + RootDirectory string `envconfig:"GITNESS_REGISTRY_S3_ROOT_DIRECTORY"` + UseDualStack bool `envconfig:"GITNESS_REGISTRY_S3_USE_DUAL_STACK" default:"false"` + LogLevel string `envconfig:"GITNESS_REGISTRY_S3_LOG_LEVEL" default:"info"` + Delete bool `envconfig:"GITNESS_REGISTRY_S3_DELETE_ENABLED" default:"true"` + Redirect bool `envconfig:"GITNESS_REGISTRY_S3_STORAGE_REDIRECT" default:"false"` + } + } + + HTTP struct { + // GITNESS_REGISTRY_HTTP_SECRET is used to encrypt the upload session details during docker push. + // If not provided, a random secret will be generated. This may cause problems with uploads if multiple + // registries are behind a load-balancer + Secret string `envconfig:"GITNESS_REGISTRY_HTTP_SECRET"` + } + + //nolint:lll + GarbageCollection struct { + Enabled bool `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_ENABLED" default:"false"` + NoIdleBackoff bool `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_NO_IDLE_BACKOFF" default:"false"` + MaxBackoffDuration time.Duration `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_MAX_BACKOFF_DURATION" default:"10m"` + InitialIntervalDuration time.Duration `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_INITIAL_INTERVAL_DURATION" default:"5s"` //nolint:lll + TransactionTimeoutDuration time.Duration `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_TRANSACTION_TIMEOUT_DURATION" default:"10s"` //nolint:lll + BlobsStorageTimeoutDuration time.Duration `envconfig:"GITNESS_REGISTRY_GARBAGE_COLLECTION_BLOB_STORAGE_TIMEOUT_DURATION" default:"5s"` //nolint:lll + } + } + Instrumentation struct { Enable bool `envconfig:"GITNESS_INSTRUMENTATION_ENABLE" default:"false"` Cron string `envconfig:"GITNESS_INSTRUMENTATION_CRON" default:"0 0 * * *"` diff --git a/types/enum/membership_role.go b/types/enum/membership_role.go index 671548288..33a48dd36 100644 --- a/types/enum/membership_role.go +++ b/types/enum/membership_role.go @@ -40,6 +40,8 @@ var membershipRoleReaderPermissions = slices.Clip(slices.Insert([]Permission{}, PermissionTemplateView, PermissionGitspaceView, PermissionInfraProviderView, + PermissionArtifactsDownload, + PermissionRegistryView, )) var membershipRoleExecutorPermissions = slices.Clip(slices.Insert(membershipRoleReaderPermissions, 0, @@ -50,11 +52,15 @@ var membershipRoleExecutorPermissions = slices.Clip(slices.Insert(membershipRole PermissionTemplateAccess, PermissionGitspaceAccess, PermissionInfraProviderAccess, + PermissionArtifactsUpload, )) var membershipRoleContributorPermissions = slices.Clip(slices.Insert(membershipRoleReaderPermissions, 0, PermissionRepoPush, PermissionRepoReview, + + PermissionArtifactsUpload, + PermissionArtifactsDelete, )) var membershipRoleSpaceOwnerPermissions = slices.Clip(slices.Insert(membershipRoleReaderPermissions, 0, @@ -93,6 +99,12 @@ var membershipRoleSpaceOwnerPermissions = slices.Clip(slices.Insert(membershipRo PermissionInfraProviderEdit, PermissionInfraProviderDelete, PermissionInfraProviderAccess, + + PermissionArtifactsUpload, + PermissionArtifactsDelete, + + PermissionRegistryEdit, + PermissionRegistryDelete, )) func init() { diff --git a/types/enum/permission.go b/types/enum/permission.go index 8df29077b..0735ed7a2 100644 --- a/types/enum/permission.go +++ b/types/enum/permission.go @@ -29,6 +29,7 @@ const ( ResourceTypeTemplate ResourceType = "TEMPLATE" ResourceTypeGitspace ResourceType = "GITSPACE" ResourceTypeInfraProvider ResourceType = "INFRAPROVIDER" + ResourceTypeRegistry ResourceType = "REGISTRY" ) // Permission represents the different types of permissions a principal can have. @@ -143,3 +144,21 @@ const ( PermissionInfraProviderDelete Permission = "infraprovider_delete" PermissionInfraProviderAccess Permission = "infraprovider_access" ) + +const ( + /* + ----- ARTIFACTS ----- + */ + PermissionArtifactsDownload Permission = "artifacts_download" + PermissionArtifactsUpload Permission = "artifacts_upload" + PermissionArtifactsDelete Permission = "artifacts_delete" +) + +const ( + /* + ----- REGISTRY ----- + */ + PermissionRegistryView Permission = "registry_view" + PermissionRegistryEdit Permission = "registry_edit" + PermissionRegistryDelete Permission = "registry_delete" +) diff --git a/web/.husky/pre-commit b/web/.husky/pre-commit new file mode 100755 index 000000000..70d516da0 --- /dev/null +++ b/web/.husky/pre-commit @@ -0,0 +1,5 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +cd web +npx lint-staged diff --git a/web/.lintstagedrc.yml b/web/.lintstagedrc.yml new file mode 100644 index 000000000..d15c9b92f --- /dev/null +++ b/web/.lintstagedrc.yml @@ -0,0 +1,8 @@ +# Apply checks for artifact registry (ar) code +'*': 'sh src/ar/scripts/license/stamp.sh' +'src/ar/**/*.{ts,tsx,p1}': 'prettier --check' +'src/ar/**/*.{ts,tsx,p2}': 'eslint --rulesdir src/ar/scripts/eslint-rules' +'src/ar/**/*.{ts,tsx,p3}': 'sh src/ar/scripts/typecheck-staged.sh' +'src/ar/**/*.scss,p1': 'stylelint --max-warnings 0' +'src/ar/**/*.scss,p2': 'prettier --check' +'src/ar/**/*.i18n.ts': 'exit 1' diff --git a/web/config/moduleFederation.config.js b/web/config/moduleFederation.config.js index ab03697f3..f8c09bb56 100644 --- a/web/config/moduleFederation.config.js +++ b/web/config/moduleFederation.config.js @@ -54,7 +54,8 @@ module.exports = { './Search': './src/pages/Search/CodeSearchPage.tsx', './Labels': './src/pages/ManageSpace/ManageLabels/ManageLabels.tsx', './WebhookDetails': './src/pages/WebhookDetails/WebhookDetails.tsx', - './NewRepoModalButton': './src/components/NewRepoModalButton/NewRepoModalButton.tsx' + './NewRepoModalButton': './src/components/NewRepoModalButton/NewRepoModalButton.tsx', + './HAREnterpriseApp': './src/ar/app/EnterpriseApp.tsx' }, shared: { formik: packageJSON.dependencies['formik'], diff --git a/web/config/webpack.common.js b/web/config/webpack.common.js index 0569d97db..5d111a3d7 100644 --- a/web/config/webpack.common.js +++ b/web/config/webpack.common.js @@ -24,6 +24,8 @@ const MiniCssExtractPlugin = require('mini-css-extract-plugin') const HTMLWebpackPlugin = require('html-webpack-plugin') const TsconfigPathsPlugin = require('tsconfig-paths-webpack-plugin') const GenerateStringTypesPlugin = require('../scripts/webpack/GenerateStringTypesPlugin').GenerateStringTypesPlugin +const GenerateArStringTypesPlugin = + require('../src/ar/scripts/webpack/GenerateArStringTypesPlugin').GenerateArStringTypesPlugin const { RetryChunkLoadPlugin } = require('webpack-retry-chunk-load-plugin') const MonacoWebpackPlugin = require('monaco-editor-webpack-plugin') const moduleFederationConfig = require('./moduleFederation.config') @@ -233,6 +235,7 @@ module.exports = { __DEV__: DEV }), new GenerateStringTypesPlugin(), + new GenerateArStringTypesPlugin(), new RetryChunkLoadPlugin({ maxRetries: 5 }), diff --git a/web/cypress/cypress.config.js b/web/cypress/cypress.config.ts similarity index 57% rename from web/cypress/cypress.config.js rename to web/cypress/cypress.config.ts index cef283e96..bde499f4d 100644 --- a/web/cypress/cypress.config.js +++ b/web/cypress/cypress.config.ts @@ -1,3 +1,10 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + import { defineConfig } from 'cypress' export default defineConfig({ diff --git a/web/cypress/integration/ar/registry/docker.spec.ts b/web/cypress/integration/ar/registry/docker.spec.ts new file mode 100644 index 000000000..caeadf919 --- /dev/null +++ b/web/cypress/integration/ar/registry/docker.spec.ts @@ -0,0 +1,181 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +import { getRandomNameByType } from '../../../utils/getRandomNameByType' + +function selectOptions(name: string) { + cy.contains(name) + .should('be.visible') + .parent() + .parent() + .parent() + .parent() + .get('span[data-icon=Options]') + .should('be.visible') + .click() +} + +describe('Docker registry e2e', () => { + const projectName = getRandomNameByType('project') + const registryName = getRandomNameByType('registry') + const upstreamProxyNameWithDockerHubSource = getRandomNameByType('upstreamProxy') + const upstreamProxyNameWithCustomSource = getRandomNameByType('upstreamProxy') + const artifactName = 'alpine' + const artifactVersion = 'latest' + + beforeEach(() => { + cy.login() + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/registries?*' }).as('getRegistries') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/artifacts?*' }).as('getArtifacts') + cy.intercept({ method: 'POST', url: 'api/v1/registry?*' }).as('createRegistry') + cy.intercept({ method: 'PUT', url: 'api/v1/registry/*' }).as('updateRegistry') + cy.intercept({ method: 'DELETE', url: 'api/v1/registry/*' }).as('deleteRegistry') + }) + + it('should create registry without any error', () => { + cy.createProject(projectName) + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.get('div[data-testid="page-subheader"]').contains('New Artifact Registry').should('be.visible').click() + cy.get('.bp3-dialog').within(() => { + cy.contains('Create a New Artifact Registry').should('be.visible') + + cy.get('div[class*="ThumbnailSelect-"]').contains('Docker').should('be.visible').click() + + cy.get('input[name="identifier"]').focus().clear().type(registryName) + + cy.get('span[data-testid="description-edit"]').should('be.visible').click().wait(500) + cy.get('textarea[name="description"]').focus().type('created from cypress automation') + + cy.get('span[data-testid="tags-edit"]').should('be.visible').click().wait(500) + cy.get('.bp3-tag-input input').focus().clear().type('test{enter}test2{enter}test3{enter}') + + cy.get('button[type="submit"]').click() + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + }) + }) + + it('should show newly created regitsry in table and should update details without any error', () => { + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(registryName).should('be.visible') + + cy.get('input[placeholder="Search"').focus().clear().type(registryName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(registryName).should('be.visible').click() + + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.get('div[role="tablist"]').contains('Configuration').should('be.visible').click() + + cy.get('button[aria-label="Save"]').should('be.visible').should('be.disabled') + + cy.get('textarea[name="description"]').focus().clear() + cy.get('span[data-testid="description-edit"]').should('be.visible').click().wait(500) + cy.get('textarea[name="description"]').focus().type('updated description from cypress automation') + + cy.get('.bp3-tag-input input').focus().clear().type('{backspace}{backspace}test4{enter}test5{enter}') + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should upload artifacts to newly created registry ', () => { + cy.executeScript({ + script: 'e2e/registry/docker.sh', + params: `--space_ref ${projectName} --registry ${registryName} --artifact ${artifactName} --version ${artifactVersion}` + }).then(scriptId => { + cy.log('scriptId', scriptId) + cy.pollExecutionApi(scriptId).its('status').should('equal', 'completed') + }) + cy.navigateToRegistry(projectName, registryName) + cy.wait(3000) + }) + + it('should able to view artifacts inside registry', () => { + cy.validateDockerArtifacts(projectName, registryName, artifactName, artifactVersion) + }) + + it('should able to add upstream proxy in registry', () => { + cy.navigateToRegistry(projectName, registryName, 'configuration') + cy.contains('Advanced (Optional)').should('be.visible').click() + cy.get('.bp3-card').contains('Upstream Proxies') + cy.get('button[aria-label="Configure Upstream"]').should('be.visible').click() + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + // create upstream proxy with dockerhub source + cy.get('button[aria-label="New Upstream Proxy"]').should('be.visible').click() + cy.get('.bp3-dialog').within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('input[type=checkbox][name=packageType][value=DOCKER]').should('be.checked').should('be.disabled') + cy.get('input[name="identifier"]').focus().clear().type(upstreamProxyNameWithDockerHubSource) + cy.get('input[type=radio][name="config.source"][value=Dockerhub]').should('be.checked') + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + // create upstream proxy with custom source + cy.get('button[aria-label="New Upstream Proxy"]').should('be.visible').click() + cy.get('.bp3-dialog').within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('input[type=checkbox][name=packageType][value=DOCKER]').should('be.checked').should('be.disabled') + cy.get('input[name="identifier"]').focus().clear().type(upstreamProxyNameWithCustomSource) + cy.get('input[type=radio][name="config.source"][value=Dockerhub]').should('be.checked') + cy.get('input[type=radio][name="config.source"][value=Custom]').should('not.be.disabled').click({ force: true }) + cy.get('input[name="config.url"]').focus().clear().type('https://registry-1.docker.io') + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + cy.get('ul[aria-label=selectable-list]') + .should('be.visible') + .within(() => { + cy.contains(upstreamProxyNameWithDockerHubSource).should('be.visible').click() + cy.contains(upstreamProxyNameWithCustomSource).should('be.visible').click() + }) + cy.get('ul[aria-label=orderable-list]') + .should('be.visible') + .within(() => { + cy.contains(upstreamProxyNameWithDockerHubSource).should('be.visible').click() + cy.contains(upstreamProxyNameWithCustomSource).should('be.visible').click() + }) + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should show newly created regitsry in table and able to delete without any error', () => { + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + cy.get('input[placeholder="Search"').focus().clear().type(registryName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + cy.contains(registryName).should('be.visible') + selectOptions(registryName) + cy.get('.bp3-menu-item').contains('Delete').should('be.visible').click() + cy.get('.bp3-dialog') + .should('be.visible') + .within(() => { + cy.contains('Delete Registry').should('be.visible') + cy.get('button[aria-label=Delete]').should('be.visible').should('not.be.disabled').click() + cy.wait('@deleteRegistry').its('response.statusCode').should('equal', 200) + }) + }) + + after(() => { + cy.deleteProject(projectName) + }) +}) diff --git a/web/cypress/integration/ar/registry/helm.spec.ts b/web/cypress/integration/ar/registry/helm.spec.ts new file mode 100644 index 000000000..7179a59c4 --- /dev/null +++ b/web/cypress/integration/ar/registry/helm.spec.ts @@ -0,0 +1,165 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +import { getRandomNameByType } from '../../../utils/getRandomNameByType' + +function selectOptions(name: string) { + cy.contains(name) + .should('be.visible') + .parent() + .parent() + .parent() + .parent() + .get('span[data-icon=Options]') + .should('be.visible') + .click() +} + +describe('Helm registry e2e', () => { + const projectName = getRandomNameByType('project', '-') + const registryName = getRandomNameByType('registry', '-') + const upstreamProxyNameWithCustomSource = getRandomNameByType('upstreamProxy', '-') + const artifactName = 'harness-delegate/harness-delegate-ng' + const searchByArtifactName = 'harness-delegate-ng' + const artifactVersion = '1.0.19' + + beforeEach(() => { + cy.login() + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/registries?*' }).as('getRegistries') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/artifacts?*' }).as('getArtifacts') + cy.intercept({ method: 'POST', url: 'api/v1/registry?*' }).as('createRegistry') + cy.intercept({ method: 'PUT', url: 'api/v1/registry/*' }).as('updateRegistry') + cy.intercept({ method: 'DELETE', url: 'api/v1/registry/*' }).as('deleteRegistry') + }) + + it('should create registry without any error', () => { + cy.createProject(projectName) + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.get('div[data-testid="page-subheader"]').contains('New Artifact Registry').should('be.visible').click() + cy.get('.bp3-dialog').within(() => { + cy.contains('Create a New Artifact Registry').should('be.visible') + + cy.get('div[class*="ThumbnailSelect-"]').contains('Helm').should('be.visible').click() + + cy.get('input[name="identifier"]').focus().clear().type(registryName) + + cy.get('span[data-testid="description-edit"]').should('be.visible').click().wait(500) + cy.get('textarea[name="description"]').focus().type('created from cypress automation') + + cy.get('span[data-testid="tags-edit"]').should('be.visible').click().wait(500) + cy.get('.bp3-tag-input input').focus().clear().type('test{enter}test2{enter}test3{enter}') + + cy.get('button[type="submit"]').click() + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + }) + }) + + it('should show newly created regitsry in table and should update details without any error', () => { + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(registryName).should('be.visible') + + cy.get('input[placeholder="Search"').focus().clear().type(registryName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(registryName).should('be.visible').click() + + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.get('div[role="tablist"]').contains('Configuration').should('be.visible').click() + + cy.get('button[aria-label="Save"]').should('be.visible').should('be.disabled') + + cy.get('textarea[name="description"]').focus().clear() + cy.get('span[data-testid="description-edit"]').should('be.visible').click().wait(500) + cy.get('textarea[name="description"]').focus().type('updated description from cypress automation') + + cy.get('.bp3-tag-input input').focus().clear().type('{backspace}{backspace}test4{enter}test5{enter}') + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should upload artifacts to newly created registry ', () => { + cy.executeScript({ + script: 'e2e/registry/helm.sh', + params: `--space_ref ${projectName} --registry ${registryName} --artifact ${artifactName} --version ${artifactVersion}` + }).then(scriptId => { + cy.log('scriptId', scriptId) + cy.pollExecutionApi(scriptId).its('status').should('equal', 'completed') + }) + cy.navigateToRegistry(projectName, registryName) + cy.wait(3000) + }) + + it('should able to view artifacts inside registry', () => { + cy.validateHelmArtifacts(projectName, registryName, searchByArtifactName, artifactVersion) + }) + + it('should able to add upstream proxy in registry', () => { + cy.navigateToRegistry(projectName, registryName, 'configuration') + cy.contains('Advanced (Optional)').should('be.visible').click() + cy.get('.bp3-card').contains('Upstream Proxies') + cy.get('button[aria-label="Configure Upstream"]').should('be.visible').click() + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + // create upstream proxy with custom source + cy.get('button[aria-label="New Upstream Proxy"]').should('be.visible').click() + cy.get('.bp3-dialog').within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('input[type=checkbox][name=packageType][value=HELM]').should('be.checked').should('be.disabled') + cy.get('input[name="identifier"]').focus().clear().type(upstreamProxyNameWithCustomSource) + cy.get('input[name="config.url"]').focus().clear().type('https://registry-1.docker.io') + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + // select upstream proxy from list + cy.get('ul[aria-label=selectable-list]') + .should('be.visible') + .within(() => { + cy.contains(upstreamProxyNameWithCustomSource).should('be.visible').click() + }) + cy.get('ul[aria-label=orderable-list]') + .should('be.visible') + .within(() => { + cy.contains(upstreamProxyNameWithCustomSource).should('be.visible').click() + }) + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should show newly created regitsry in table and able to delete without any error', () => { + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(registryName).should('be.visible') + + cy.get('input[placeholder="Search"').focus().clear().type(registryName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + selectOptions(registryName) + cy.get('.bp3-menu-item').contains('Delete').should('be.visible').click() + cy.get('.bp3-dialog') + .should('be.visible') + .within(() => { + cy.contains('Delete Registry').should('be.visible') + cy.get('button[aria-label=Delete]').should('be.visible').should('not.be.disabled').click() + cy.wait('@deleteRegistry').its('response.statusCode').should('equal', 200) + }) + }) + + after(() => { + cy.deleteProject(projectName) + }) +}) diff --git a/web/cypress/integration/ar/upstream/upstream.docker.spec.ts b/web/cypress/integration/ar/upstream/upstream.docker.spec.ts new file mode 100644 index 000000000..9731352f7 --- /dev/null +++ b/web/cypress/integration/ar/upstream/upstream.docker.spec.ts @@ -0,0 +1,169 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { getRandomNameByType } from '../../../utils/getRandomNameByType' + +describe('Docker upstream registry e2e', () => { + const projectName = getRandomNameByType('project') + const virtualRegistry = getRandomNameByType('registry') + const registryNameWithDockerHubSource = getRandomNameByType('registry') + const registryNameWithCustomSource = getRandomNameByType('registry') + + const artifactName = 'alpine' + const artifactVersion = 'latest' + const upstreamURL = 'duflee.com' + const upstreamProject = 'ar-test' + const upstreamUsername = 'admin' + const upstreamPassword = 'Harbor12345' + + beforeEach(() => { + cy.login() + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/registries?*' }).as('getRegistries') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'POST', url: 'api/v1/registry?*' }).as('createRegistry') + cy.intercept({ method: 'PUT', url: 'api/v1/registry/*' }).as('updateRegistry') + cy.intercept({ method: 'DELETE', url: 'api/v1/registry/*' }).as('deleteRegistry') + cy.intercept({ method: 'POST', url: 'api/v1/secrets' }).as('createSecret') + }) + + it('should create registry with docker hub source', () => { + cy.createProject(projectName) + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.get('div[data-testid="page-subheader"] button[class*="SplitButton--dropdown--"]').should('be.visible').click() + cy.get('.bp3-popover') + .should('be.visible') + .within(() => { + cy.contains('Upstream Proxy').should('be.visible').click() + }) + cy.get('.bp3-dialog') + .should('be.visible') + .within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('input[type=checkbox][name=packageType][value=DOCKER]').should('be.checked') + cy.get('input[name="identifier"]').focus().clear().type(registryNameWithDockerHubSource) + cy.get('input[type=radio][name="config.source"][value=Dockerhub]').should('be.checked') + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + }) + + it('should create registry with custom source', () => { + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.get('div[data-testid="page-subheader"] button[class*="SplitButton--dropdown--"]').should('be.visible').click() + cy.get('.bp3-popover') + .should('be.visible') + .within(() => { + cy.contains('Upstream Proxy').should('be.visible').click() + }) + cy.get('.bp3-dialog') + .should('be.visible') + .within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('input[type=checkbox][name=packageType][value=DOCKER]').should('be.checked') + cy.get('input[name="identifier"]').focus().clear().type(registryNameWithCustomSource) + cy.get('input[type=radio][name="config.source"][value=Dockerhub]').should('be.checked') + cy.get('input[type=radio][name="config.source"][value=Custom]').should('not.be.disabled').click({ force: true }) + cy.get('input[name="config.url"]').focus().clear().type(`https://${upstreamURL}`) + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('input[type=radio][name="config.authType"][value=UserPassword]').click({ force: true }) + cy.get('input[name="config.auth.userName"]').scrollIntoView().should('be.visible') + cy.get('input[name="config.auth.userName"]').scrollIntoView().focus().clear().type(upstreamUsername) + cy.get('button[aria-label="New Secret"]').should('be.visible').click() + }) + cy.get('.bp3-dialog .bp3-dialog-header') + .contains('Create a secret') + .parent() + .parent() + .parent() + .within(() => { + cy.get('input[name="name"]').focus().clear().type(upstreamPassword) + cy.get('textarea[name="value"]').focus().clear().type(upstreamPassword) + cy.get('input[name="description"]').focus().clear().type('Created from cypress automation') + cy.get('button[aria-label="Create Secret"]').should('be.visible').click() + cy.wait('@createSecret').its('response.statusCode').should('equal', 201) + }) + + cy.get('.bp3-dialog').within(() => { + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + }) + + it('should able to link upstream proxy to registry', () => { + cy.createRegistry(projectName, virtualRegistry, 'DOCKER', 'VIRTUAL') + cy.navigateToRegistries(projectName) + + cy.get('input[placeholder="Search"').focus().clear().type(virtualRegistry) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(virtualRegistry).should('be.visible') + + cy.navigateToRegistry(projectName, virtualRegistry, 'configuration') + + cy.contains('Advanced (Optional)').should('be.visible').click() + cy.get('.bp3-card').contains('Upstream Proxies') + cy.get('button[aria-label="Configure Upstream"]').should('be.visible').click() + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + cy.get('ul[aria-label=selectable-list]') + .should('be.visible') + .within(() => { + cy.contains(registryNameWithCustomSource).should('be.visible').click() + cy.contains(registryNameWithDockerHubSource).should('be.visible').click() + }) + cy.get('ul[aria-label=orderable-list]') + .should('be.visible') + .within(() => { + cy.contains(registryNameWithCustomSource).should('be.visible').click() + cy.contains(registryNameWithDockerHubSource).should('be.visible').click() + }) + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should able to fetch the image from upstream proxy', () => { + cy.executeScript({ + script: 'e2e/upstream/docker.sh', + params: `--space_ref ${projectName} --registry ${virtualRegistry} --artifact ${artifactName} --version ${artifactVersion} --upstream_url ${upstreamURL} --upstream_project ${upstreamProject}` + }).then(scriptId => { + cy.log('scriptId', scriptId) + cy.pollExecutionApi(scriptId).its('status').should('equal', 'completed') + }) + // upstream proxy artifacts takes 1min to reflect on UI, so added wait + cy.wait(20000) + cy.navigateToRegistry(projectName, registryNameWithCustomSource) + }) + + it('should able to view artifacts inside upstream registry', () => { + cy.validateDockerArtifacts( + projectName, + registryNameWithCustomSource, + `${upstreamProject}/${artifactName}`, + artifactVersion + ) + }) + + after(() => { + cy.deleteRegistry(projectName, virtualRegistry) + cy.deleteRegistry(projectName, registryNameWithDockerHubSource) + cy.deleteRegistry(projectName, registryNameWithCustomSource) + cy.deleteProject(projectName) + }) +}) diff --git a/web/cypress/integration/ar/upstream/upstream.helm.spec.ts b/web/cypress/integration/ar/upstream/upstream.helm.spec.ts new file mode 100644 index 000000000..8f55ba90b --- /dev/null +++ b/web/cypress/integration/ar/upstream/upstream.helm.spec.ts @@ -0,0 +1,148 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { getRandomNameByType } from '../../../utils/getRandomNameByType' + +describe('Helm upstream registry e2e', () => { + const virtualRegistry = getRandomNameByType('registry', '-') + const projectName = getRandomNameByType('project', '-') + const registryNameWithCustomSource = getRandomNameByType('registry', '-') + + const artifactName = 'harness-delegate/harness-delegate-ng' + const searchByArtifactName = 'harness-delegate-ng' + const artifactVersion = '1.0.19' + + const upstreamURL = 'duflee.com' + const upstreamProject = 'ar-test' + const upstreamUsername = 'admin' + const upstreamPassword = 'Harbor12345' + + beforeEach(() => { + cy.login() + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/registries?*' }).as('getRegistries') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'POST', url: 'api/v1/registry?*' }).as('createRegistry') + cy.intercept({ method: 'PUT', url: 'api/v1/registry/*' }).as('updateRegistry') + cy.intercept({ method: 'DELETE', url: 'api/v1/registry/*' }).as('deleteRegistry') + cy.intercept({ method: 'POST', url: 'api/v1/secrets' }).as('createSecret') + }) + + it('should create registry with custom source', () => { + cy.createProject(projectName) + cy.navigateToRegistries(projectName) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.get('div[data-testid="page-subheader"] button[class*="SplitButton--dropdown--"]').should('be.visible').click() + cy.get('.bp3-popover') + .should('be.visible') + .within(() => { + cy.contains('Upstream Proxy').should('be.visible').click() + }) + + // create upstream proxy modal + cy.get('.bp3-dialog') + .should('be.visible') + .within(() => { + cy.contains('Create a New Upstream Proxy').should('be.visible') + cy.get('button[type=submit]').should('be.visible').click() + cy.get('input[type=checkbox][name=packageType][value=HELM]').click({ force: true }) + cy.get('input[name="identifier"]').focus().clear().type(registryNameWithCustomSource) + cy.get('input[name="config.url"]').focus().clear().type(`https://${upstreamURL}`) + cy.get('input[type=radio][name="config.authType"][value=Anonymous]').should('be.checked') + cy.get('input[type=radio][name="config.authType"][value=UserPassword]').click({ force: true }) + cy.get('input[name="config.auth.userName"]').scrollIntoView().should('be.visible') + cy.get('input[name="config.auth.userName"]').scrollIntoView().focus().clear().type(upstreamUsername) + cy.get('button[aria-label="New Secret"]').should('be.visible').click() + }) + + // create secret + cy.get('.bp3-dialog .bp3-dialog-header') + .contains('Create a secret') + .parent() + .parent() + .parent() + .within(() => { + cy.get('input[name="name"]').focus().clear().type(upstreamPassword) + cy.get('textarea[name="value"]').focus().clear().type(upstreamPassword) + cy.get('input[name="description"]').focus().clear().type('Created from cypress automation') + cy.get('button[aria-label="Create Secret"]').should('be.visible').click() + cy.wait('@createSecret').its('response.statusCode').should('equal', 201) + }) + + cy.get('.bp3-dialog').within(() => { + cy.get('button[type=submit]').should('be.visible').click() + }) + cy.wait('@createRegistry').its('response.statusCode').should('equal', 201) + }) + + it('should able to link upstream proxy to registry', () => { + cy.createRegistry(projectName, virtualRegistry, 'HELM', 'VIRTUAL') + cy.navigateToRegistries(projectName) + + cy.get('input[placeholder="Search"').focus().clear().type(virtualRegistry) + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + cy.contains(virtualRegistry).should('be.visible') + + cy.navigateToRegistry(projectName, virtualRegistry, 'configuration') + + cy.contains('Advanced (Optional)').should('be.visible').click() + cy.get('.bp3-card').contains('Upstream Proxies') + cy.get('button[aria-label="Configure Upstream"]').should('be.visible').click() + cy.wait('@getRegistries').its('response.statusCode').should('equal', 200) + + cy.get('ul[aria-label=selectable-list]') + .should('be.visible') + .within(() => { + cy.contains(registryNameWithCustomSource).should('be.visible').click() + }) + cy.get('ul[aria-label=orderable-list]') + .should('be.visible') + .within(() => { + cy.contains(registryNameWithCustomSource).should('be.visible').click() + }) + cy.get('button[aria-label="Save"]').should('be.visible').should('not.be.disabled').click() + + cy.wait('@updateRegistry').its('response.statusCode').should('equal', 200) + cy.get('.bp3-toast-message').contains('Registry updated successfully') + }) + + it('should upload artifacts to newly created registry ', () => { + cy.executeScript({ + script: 'e2e/upstream/helm.sh', + params: `--space_ref ${projectName} --registry ${virtualRegistry} --artifact ${artifactName} --version ${artifactVersion} --upstream_url ${upstreamURL} --upstream_project ${upstreamProject}` + }).then(scriptId => { + cy.log('scriptId', scriptId) + cy.pollExecutionApi(scriptId).its('status').should('equal', 'completed') + }) + // upstream proxy artifacts takes 1min to reflect on UI, so added wait + cy.wait(20000) + cy.navigateToRegistry(projectName, virtualRegistry) + }) + + it('should able to view artifacts inside registry', () => { + cy.validateHelmArtifacts( + projectName, + registryNameWithCustomSource, + `${upstreamProject}/${searchByArtifactName}`, + artifactVersion + ) + }) + + after(() => { + cy.deleteRegistry(projectName, virtualRegistry) + cy.deleteRegistry(projectName, registryNameWithCustomSource) + cy.deleteProject(projectName) + }) +}) diff --git a/web/cypress/package.json b/web/cypress/package.json index cfd997b61..ebff532b1 100644 --- a/web/cypress/package.json +++ b/web/cypress/package.json @@ -3,11 +3,24 @@ "private": "true", "scripts": { "cypress:open": "cypress open", - "server": "node server.js" + "serve": "node server.js", + "serve:ar": "node dist/server/index.js", + "build:ar": "tsc", + "dev": "nodemon --watch 'server/**/*.ts' --exec 'ts-node' server/index.ts" }, "main": "index.js", "dependencies": { - "cypress": "10.8.0", + "@testing-library/cypress": "^8.0.3", + "child_process": "^1.0.2", + "cypress": "^11.1.0", + "express": "^4.19.2", + "typescript": "^4.8.4", "wait-on": "^6.0.1" + }, + "devDependencies": { + "@types/express": "^4.17.21", + "@types/node": "^20.14.10", + "nodemon": "^3.1.4", + "ts-node": "^10.9.2" } } diff --git a/web/cypress/scripts/e2e/registry/docker.sh b/web/cypress/scripts/e2e/registry/docker.sh new file mode 100644 index 000000000..ac0c96a25 --- /dev/null +++ b/web/cypress/scripts/e2e/registry/docker.sh @@ -0,0 +1,63 @@ +# Copyright 2024 Harness Inc. All rights reserved. +# Use of this source code is governed by the PolyForm Shield 1.0.0 license +# that can be found in the licenses directory at the root of this repository, also available at +# https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + +# Extract named variables from arguments + +#!/bin/bash +while [ "$1" != "" ]; do + case $1 in + --space_ref ) shift + SPACE_REF=$1 + ;; + --registry ) shift + REGISTRY=$1 + ;; + --artifact ) shift + ARTIFACT=$1 + ;; + --version ) shift + VERSION=$1 + ;; + * ) echo "Invalid parameter: $1" + exit 1 + esac + shift +done + +set -a +source .env +set +a + +echo "Host: $DOCKER_LOCAL_HOST" +echo "Username: $USERNAME" + +echo "Start of docker e2e script" +echo "" + +echo "docker login $DOCKER_LOCAL_HOST" +docker login $DOCKER_LOCAL_HOST --username $USERNAME --password $PASSWORD + +echo "docker pull $ARTIFACT:$VERSION" +docker pull $ARTIFACT:$VERSION +echo "" + +echo "docker tag $ARTIFACT:$VERSION $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION" +docker tag $ARTIFACT:$VERSION $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION +echo "" + +echo "docker push $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION" +docker push $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION +echo "" + +echo "docker rmi $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION" +docker rmi $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION +echo "" + +echo "docker pull $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION" +docker pull $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT:$VERSION +echo "" + +echo "docker logout" +docker logout $DOCKER_LOCAL_HOST \ No newline at end of file diff --git a/web/cypress/scripts/e2e/registry/helm.sh b/web/cypress/scripts/e2e/registry/helm.sh new file mode 100644 index 000000000..4fe37fb7b --- /dev/null +++ b/web/cypress/scripts/e2e/registry/helm.sh @@ -0,0 +1,64 @@ +# Copyright 2024 Harness Inc. All rights reserved. +# Use of this source code is governed by the PolyForm Shield 1.0.0 license +# that can be found in the licenses directory at the root of this repository, also available at +# https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + +# Extract named variables from arguments + +#!/bin/bash +while [ "$1" != "" ]; do + case $1 in + --space_ref ) shift + SPACE_REF=$1 + ;; + --registry ) shift + REGISTRY=$1 + ;; + --artifact ) shift + ARTIFACT=$1 + ;; + --version ) shift + VERSION=$1 + ;; + * ) echo "Invalid parameter: $1" + exit 1 + esac + shift +done + +set -a +source .env +set +a + +echo "Start of helm e2e script" +echo "" + +echo "helm login" +helm registry login $LOCAL_HOST --username $USERNAME --password $PASSWORD --insecure + +echo "helm pull $ARTIFACT --version $VERSION" +helm pull $ARTIFACT --version $VERSION +echo "" + +ARTIFACT_NAME=${ARTIFACT##*/} +ARTIFACT_FILE_NAME="$ARTIFACT_NAME-$VERSION.tgz" +echo $ARTIFACT_FILE_NAME + +echo "helm push $ARTIFACT_FILE_NAME oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY" +helm push $ARTIFACT_FILE_NAME oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY +echo "" + +echo "rm -rf $ARTIFACT_FILE_NAME" +rm -rf $ARTIFACT_FILE_NAME +echo "" + +echo "helm pull oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT_NAME --version $VERSION" +helm pull oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY/$ARTIFACT_NAME --version $VERSION +echo "" + +echo "rm -rf $ARTIFACT_FILE_NAME" +rm -rf $ARTIFACT_FILE_NAME +echo "" + +echo "helm logout" +helm registry logout $LOCAL_HOST \ No newline at end of file diff --git a/web/cypress/scripts/e2e/upstream/docker.sh b/web/cypress/scripts/e2e/upstream/docker.sh new file mode 100644 index 000000000..adf817d96 --- /dev/null +++ b/web/cypress/scripts/e2e/upstream/docker.sh @@ -0,0 +1,73 @@ +# Copyright 2024 Harness Inc. All rights reserved. +# Use of this source code is governed by the PolyForm Shield 1.0.0 license +# that can be found in the licenses directory at the root of this repository, also available at +# https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + +# Extract named variables from arguments + +#!/bin/bash +while [ "$1" != "" ]; do + case $1 in + --space_ref ) shift + SPACE_REF=$1 + ;; + --registry ) shift + REGISTRY=$1 + ;; + --artifact ) shift + ARTIFACT=$1 + ;; + --version ) shift + VERSION=$1 + ;; + --upstream_url) shift + UPSTREAM_URL=$1 + ;; + --upstream_project) shift + UPSTREAM_PROJECT=$1 + ;; + * ) echo "Invalid parameter: $1" + exit 1 + esac + shift +done + +set -a +source .env +set +a + +echo "Host: $DOCKER_LOCAL_HOST" +echo "Username: $USERNAME" + +echo "Start of docker e2e script" +echo "" + +echo "docker login $DOCKER_LOCAL_HOST" +docker login $DOCKER_LOCAL_HOST --username $USERNAME --password $PASSWORD + +echo "docker pull $ARTIFACT:$VERSION" +docker pull $ARTIFACT:$VERSION +echo "" + +echo "docker tag $ARTIFACT:$VERSION $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION" +docker tag $ARTIFACT:$VERSION $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION +echo "" + +echo "docker push $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION" +docker push $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION +echo "" + +echo "docker rmi $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION" +docker rmi $UPSTREAM_URL/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION +echo "" + +echo "docker pull $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION" +docker pull $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION +echo "" + +echo "docker rmi $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION" +docker rmi $DOCKER_LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT:$VERSION +echo "" + +echo "docker logout" +docker logout $DOCKER_LOCAL_HOST \ No newline at end of file diff --git a/web/cypress/scripts/e2e/upstream/helm.sh b/web/cypress/scripts/e2e/upstream/helm.sh new file mode 100644 index 000000000..05845f0d2 --- /dev/null +++ b/web/cypress/scripts/e2e/upstream/helm.sh @@ -0,0 +1,70 @@ +# Copyright 2024 Harness Inc. All rights reserved. +# Use of this source code is governed by the PolyForm Shield 1.0.0 license +# that can be found in the licenses directory at the root of this repository, also available at +# https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + +# Extract named variables from arguments + +#!/bin/bash +while [ "$1" != "" ]; do + case $1 in + --space_ref ) shift + SPACE_REF=$1 + ;; + --registry ) shift + REGISTRY=$1 + ;; + --artifact ) shift + ARTIFACT=$1 + ;; + --version ) shift + VERSION=$1 + ;; + --upstream_url) shift + UPSTREAM_URL=$1 + ;; + --upstream_project) shift + UPSTREAM_PROJECT=$1 + ;; + * ) echo "Invalid parameter: $1" + exit 1 + esac + shift +done + +set -a +source .env +set +a + +echo "Start of helm e2e script" +echo "" + +echo "helm login" +helm registry login $LOCAL_HOST --username $USERNAME --password $PASSWORD --insecure + +echo "helm pull $ARTIFACT --version $VERSION" +helm pull $ARTIFACT --version $VERSION +echo "" + +ARTIFACT_NAME=${ARTIFACT##*/} +ARTIFACT_FILE_NAME="$ARTIFACT_NAME-$VERSION.tgz" +echo $ARTIFACT_FILE_NAME + +echo "helm push $ARTIFACT_FILE_NAME oci://$UPSTREAM_URL/$UPSTREAM_PROJECT" +helm push $ARTIFACT_FILE_NAME oci://$UPSTREAM_URL/$UPSTREAM_PROJECT +echo "" + +echo "rm -rf $ARTIFACT_FILE_NAME" +rm -rf $ARTIFACT_FILE_NAME +echo "" + +echo "helm pull oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT_NAME --version $VERSION" +helm pull oci://$LOCAL_HOST/$SPACE_REF/$REGISTRY/$UPSTREAM_PROJECT/$ARTIFACT_NAME --version $VERSION +echo "" + +echo "rm -rf $ARTIFACT_FILE_NAME" +rm -rf $ARTIFACT_FILE_NAME +echo "" + +echo "helm logout" +helm registry logout $LOCAL_HOST \ No newline at end of file diff --git a/web/cypress/server/index.ts b/web/cypress/server/index.ts new file mode 100644 index 000000000..4fba41cf6 --- /dev/null +++ b/web/cypress/server/index.ts @@ -0,0 +1,69 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +import express from 'express' +import { exec } from 'child_process' +import path from 'path' +import { v4 as uuidv4 } from 'uuid' + +const app = express() +const port = 3001 + +interface ScriptStatus { + status: 'pending' | 'running' | 'completed' | 'error' + output: string + error: string +} + +const scriptStatuses: Record = {} + +app.get('/execute-script', (req, res) => { + const { script, params = '' } = req.query + const scriptId = uuidv4() + scriptStatuses[scriptId] = { status: 'pending', output: '', error: '' } + const scriptPath = path.join(process.cwd(), `scripts/${script}`) + const command = `sh ${scriptPath} ${params}` + + console.log(scriptPath, params, command) + const child = exec(command) + + scriptStatuses[scriptId].status = 'running' + + child.stdout.on('data', data => { + scriptStatuses[scriptId].output += data + }) + + child.stderr.on('data', data => { + scriptStatuses[scriptId].error += data + }) + + child.on('close', code => { + if (code === 0) { + scriptStatuses[scriptId].status = 'completed' + } else { + scriptStatuses[scriptId].status = 'error' + } + }) + + res.send({ scriptId }) +}) + +app.get('/script-status/:id', (req, res) => { + const { id } = req.params + const status = scriptStatuses[id] + + if (!status) { + res.status(404).send('Script ID not found') + return + } + + res.send(status) +}) + +app.listen(port, () => { + console.log(`Server is running at http://localhost:${port}`) +}) diff --git a/web/cypress/support/commands.ts b/web/cypress/support/commands.ts index cdab9b1b6..0aa5ce535 100644 --- a/web/cypress/support/commands.ts +++ b/web/cypress/support/commands.ts @@ -1,68 +1,70 @@ -// @ts-check -/// -// *********************************************** -// This example commands.js shows you how to -// create various custom commands and overwrite -// existing commands. -// -// For more comprehensive examples of custom -// commands please read more here: -// https://on.cypress.io/custom-commands -// *********************************************** -// -// -// -- This is a parent command -- -// Cypress.Commands.add('login', (email, password) => { ... }) -// -// -// -- This is a child command -- -// Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... }) -// -// -// -- This is a dual command -- -// Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... }) -// -// -// -- This will overwrite an existing command -- -// Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... }) +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ -declare namespace Cypress { - interface Chainable { - clickSubmit(): void - visitPageAssertion(className?: string): void - getAccountIdentifier(): any - apiRequest( - method: string, - endpoint: string, - body?: any, - queryParams?: { [key: string]: string }, - headerOptions?: HeadersInit - ): void - getSecret(scope: string): any - fillName(name: string): void - fillField(fieldName: string, value: string): void - login(username?: string, password?: string): void +import path from 'path' +import { generateRequestObject } from '../utils/generateRequestObject' +import { PACKAGE_TYPE, REGISTRY_TYPE } from '../utils/types' +import { getRandomCreateRegistryBody } from '../utils/getRequestBodies' + +const POLLING_INTERVAL = 1000 // 1 second +const TIMEOUT = 3 * 60000 // 1 minute +declare global { + namespace Cypress { + interface Chainable { + apiRequest( + method: string, + endpoint: string, + body?: any, + queryParams?: { [key: string]: string | boolean }, + headerOptions?: HeadersInit, + failOnStatusCode?: boolean, + timeout?: number, + origin?: string + ): Chainable> + login(): Chainable + createProject(name: string): Chainable + deleteProject(name: string): Chainable + createRegistry(space: string, name: string, packageType: PACKAGE_TYPE, type: REGISTRY_TYPE): Chainable + deleteRegistry(space: string, name: string): Chainable + navigateToRegistries(space: string): Chainable + navigateToRegistry(space: string, registry: string, tab?: string): Chainable + navigateToArtifactList(space: string, registry: string): Chainable + navigateToArtifactDetails(space: string, registry: string, artifact: string): Chainable + navigateToVersionDetails(space: string, registry: string, artifact: string, version: string): Chainable + validateDockerArtifacts(space: string, registry: string, artifact: string, version: string): Chainable + validateDockerArtifactDetails(space: string, registry: string, artifact: string, version: string): Chainable + validateDockerArtifactVersionDetails( + space: string, + registry: string, + artifact: string, + version: string + ): Chainable + validateHelmArtifacts(space: string, registry: string, artifact: string, version: string): Chainable + validateHelmArtifactDetails(space: string, registry: string, artifact: string, version: string): Chainable + validateHelmArtifactVersionDetails( + space: string, + registry: string, + artifact: string, + version: string + ): Chainable + executeScript(queryParams: Record): Chainable + pollExecutionApi(scriptId: string): Chainable + pollApi( + endpoint: string, + validate: (res: any) => boolean, + interval?: number, + timeout?: number + ): Chainable> + } } } export const activeTabClassName = '.TabNavigation--active' -Cypress.Commands.add('visitPageAssertion', (className = activeTabClassName) => { - cy.get(className, { - timeout: 30000 - }).should('be.visible') - cy.wait(1000) -}) -Cypress.Commands.add('clickSubmit', () => { - cy.get('input[type="submit"]').click() -}) - -Cypress.Commands.add('getAccountIdentifier', () => { - cy.location('hash').then(hash => { - return cy.wrap(hash.split('/')[2]) - }) -}) - Cypress.Commands.add( 'apiRequest', ( @@ -70,15 +72,292 @@ Cypress.Commands.add( endpoint: string, body?: unknown, queryParams?: { [key: string]: string }, - headerOptions?: HeadersInit + headerOptions?: HeadersInit, + failOnStatusCode?: boolean, + timeout?: number, + origin?: string ) => { - cy.request(generateRequestObject(method, endpoint, body, queryParams, headerOptions)) + cy.request({ + ...generateRequestObject(method, endpoint, body, queryParams, headerOptions, origin), + failOnStatusCode, + timeout + }).as(endpoint) } ) -Cypress.Commands.add('fillField', (fieldName: string, value: string) => { - cy.get(`[name="${fieldName}"]`).clear().type(value) +Cypress.Commands.add('executeScript', (queryParams: Record) => { + cy.apiRequest('GET', 'execute-script', null, queryParams, null, true, null, 'http://localhost:3001').then(res => { + cy.wrap(res.body.scriptId) + }) }) -Cypress.Commands.add('fillName', (value: string) => { - cy.fillField('name', value) + +Cypress.Commands.add('pollExecutionApi', (scriptId: string) => { + cy.pollApi(`http://localhost:3001/script-status/${scriptId}`, res => res.body.status === 'completed', 3000).then( + res => { + cy.wrap({ status: res.body.status }) + } + ) +}) + +Cypress.Commands.add( + 'pollApi', + ( + endpoint: string, + validate: (res: any) => boolean, + interval: number = POLLING_INTERVAL, + timeout: number = TIMEOUT + ) => { + const startTime = new Date().getTime() + + function makeRequest() { + return cy.request(endpoint).then(response => { + if (validate(response)) { + // If the validation is successful, return the response + return response + } else if (new Date().getTime() - startTime > timeout) { + // If the timeout is exceeded, throw an error + throw new Error('Polling timed out') + } else { + // If validation fails, wait for the interval and make the request again + return cy.wait(interval).then(makeRequest) + } + }) + } + + return makeRequest() + } +) + +Cypress.Commands.add('login', () => { + cy.visit('/') + cy.intercept({ + method: 'POST', + url: 'api/v1/login?**' + }).as('login') + + // TODO: move this to config + cy.get('input[name="username"]').focus().clear().type('admin') + cy.get('input[name="password"]').focus().clear().type('changeit') + cy.get('button[type="submit"]').click() + + cy.wait('@login', { timeout: 60000 }).its('response.statusCode').should('equal', 200) + cy.visit('/') +}) + +Cypress.Commands.add('createProject', (name: string) => { + cy.intercept({ + method: 'POST', + url: `api/v1/spaces` + }).as('createSpace') + cy.intercept({ + method: 'GET', + url: `api/v1/spaces/${name}` + }).as('getSpaceDetails') + cy.get('div[role="button"][class*="SpaceSelector-"').should('be.visible').click() + cy.contains('New Project').should('be.visible').click() + cy.get('input[name="name"]').focus().clear().type(name) + cy.get('input[name="description"]').focus().clear().type('Created from cypress') + cy.get('button[type="submit"]').click() + cy.wait('@createSpace').its('response.statusCode').should('equal', 201) + cy.wait('@getSpaceDetails').its('response.statusCode').should('equal', 200) +}) + +Cypress.Commands.add('deleteProject', (name: string) => { + cy.apiRequest('DELETE', 'api/v1/spaces', name, {}, undefined, false) +}) + +Cypress.Commands.add('deleteRegistry', (space: string, name: string) => { + const registryRef = encodeURIComponent(`${space}/${name}`) + cy.apiRequest('DELETE', `api/v1/registry/${registryRef}`) +}) + +Cypress.Commands.add( + 'createRegistry', + (space: string, name: string, packageType: PACKAGE_TYPE, type: REGISTRY_TYPE) => { + const requestBody = getRandomCreateRegistryBody(name, packageType, type) + cy.apiRequest('POST', `api/v1/registry?parent_ref=${space}`, requestBody).its('status').should('equal', 201) + } +) + +Cypress.Commands.add('navigateToRegistries', (space: string) => { + cy.visit(`/spaces/${space}/registries`) +}) + +Cypress.Commands.add('navigateToRegistry', (space: string, registry: string, tab = 'packages') => { + cy.visit(`/spaces/${space}/registries/${registry}?tab=${tab}`) +}) + +Cypress.Commands.add('navigateToArtifactList', (space: string, registry: string) => { + cy.visit(`/spaces/${space}/registries/${registry}?tab=packages`) +}) + +Cypress.Commands.add('navigateToArtifactDetails', (space: string, registry: string, artifact: string) => { + cy.visit(`/spaces/${space}/registries/${registry}/artifacts/${artifact}`) +}) + +Cypress.Commands.add( + 'navigateToVersionDetails', + (space: string, registry: string, artifact: string, version: string) => { + cy.visit(`/spaces/${space}/registries/${registry}/artifacts/${artifact}/versions/${version}`) + } +) + +Cypress.Commands.add( + 'validateDockerArtifactVersionDetails', + (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/version/*/summary' }).as('getVersionSummary') + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/docker/details?*' }).as( + 'getDockerVersionDetails' + ) + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/docker/layers?*' }).as( + 'getDockerVersionLayers' + ) + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/docker/manifest?*' }).as( + 'getDockerVersionManifest' + ) + + cy.navigateToVersionDetails(space, registry, artifact, version) + cy.wait('@getVersionSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getDockerVersionDetails').its('response.statusCode').should('equal', 200) + cy.wait('@getDockerVersionLayers').its('response.statusCode').should('equal', 200) + + cy.get('button[aria-label=Manifest]').should('be.visible').click() + cy.wait('@getDockerVersionManifest').its('response.statusCode').should('equal', 200) + } +) + +Cypress.Commands.add( + 'validateDockerArtifactDetails', + (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/summary' }).as('getArtifactSummary') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/versions?*' }).as('getArtifactVersions') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/version/*/docker/manifests' }).as( + 'getDockerVersionManifests' + ) + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/version/*/summary' }).as('getVersionSummary') + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/docker/details?*' }).as( + 'getDockerVersionDetails' + ) + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/docker/layers?*' }).as( + 'getDockerVersionLayers' + ) + + cy.navigateToArtifactDetails(space, registry, artifact) + + cy.wait('@getArtifactSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) + cy.get('div[class*="TableV2--cells--"] div[class*="TableV2--cell--"]').eq(1).contains(version).should('be.visible') + cy.get('input[placeholder="Search"').focus().clear().type(version) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) + cy.get('div[class*="TableV2--cells--"] div[class*="TableV2--cell--"]') + .eq(1) + .contains(version) + .should('be.visible') + .click() + + // digest list + cy.wait('@getDockerVersionManifests').its('response.statusCode').should('equal', 200) + cy.get('div[class*="DigestListTable-"] div[class*="TableV2--cells--"] div[class*="TableV2--cell--"] a').click() + + // version details page + cy.wait('@getVersionSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getDockerVersionDetails').its('response.statusCode').should('equal', 200) + cy.wait('@getDockerVersionLayers').its('response.statusCode').should('equal', 200) + + cy.validateDockerArtifactVersionDetails(space, registry, artifact, version) + } +) + +Cypress.Commands.add( + 'validateDockerArtifacts', + (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/artifacts?*' }).as('getArtifacts') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/summary' }).as('getArtifactSummary') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/versions?*' }).as('getArtifactVersions') + + cy.navigateToArtifactList(space, registry) + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.contains(artifact).should('be.visible').should('be.visible') + + cy.get('input[placeholder="Search"').focus().clear().type(artifact) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.contains(artifact).should('be.visible').should('be.visible').click() + + // artifact details page + cy.wait('@getArtifactSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) + cy.validateDockerArtifactDetails(space, registry, artifact, version) + } +) + +Cypress.Commands.add( + 'validateHelmArtifactVersionDetails', + (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/version/*/summary' }).as('getVersionSummary') + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/helm/details' }).as( + 'getHelmVersionDetails' + ) + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/helm/manifest' }).as( + 'getHelmVersionManifest' + ) + + cy.navigateToVersionDetails(space, registry, artifact, version) + cy.wait('@getVersionSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getHelmVersionDetails').its('response.statusCode').should('equal', 200) + cy.wait('@getHelmVersionManifest').its('response.statusCode').should('equal', 200) + } +) + +Cypress.Commands.add( + 'validateHelmArtifactDetails', + (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/summary' }).as('getArtifactSummary') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/versions?*' }).as('getArtifactVersions') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/version/*/summary' }).as('getVersionSummary') + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/helm/details' }).as( + 'getHelmVersionDetails' + ) + cy.intercept({ method: 'GET', url: '/api/v1/registry/*/artifact/*/version/*/helm/manifest' }).as( + 'getHelmVersionManifest' + ) + + cy.navigateToArtifactDetails(space, registry, artifact) + cy.wait('@getArtifactSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) + cy.get('div[class*="TableV2--cells--"] div[class*="TableV2--cell--"]').eq(0).contains(version).should('be.visible') + cy.get('input[placeholder="Search"').focus().clear().type(version) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) + cy.get('div[class*="TableV2--cells--"] div[class*="TableV2--cell--"]') + .eq(0) + .contains(version) + .should('be.visible') + .click() + + // version details page + cy.wait('@getVersionSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getHelmVersionDetails').its('response.statusCode').should('equal', 200) + cy.wait('@getHelmVersionManifest').its('response.statusCode').should('equal', 200) + } +) + +Cypress.Commands.add('validateHelmArtifacts', (space: string, registry: string, artifact: string, version: string) => { + cy.intercept({ method: 'GET', url: 'api/v1/registry/*' }).as('getRegistry') + cy.intercept({ method: 'GET', url: 'api/v1/spaces/*/artifacts?*' }).as('getArtifacts') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/summary' }).as('getArtifactSummary') + cy.intercept({ method: 'GET', url: 'api/v1/registry/*/artifact/*/versions?*' }).as('getArtifactVersions') + + cy.navigateToArtifactList(space, registry) + cy.wait('@getRegistry').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.contains(artifact).should('be.visible').should('be.visible') + + cy.get('input[placeholder="Search"').focus().clear().type(artifact) + cy.wait('@getArtifacts').its('response.statusCode').should('equal', 200) + cy.contains(artifact).should('be.visible').should('be.visible').click() + + // artifact details page + cy.wait('@getArtifactSummary').its('response.statusCode').should('equal', 200) + cy.wait('@getArtifactVersions').its('response.statusCode').should('equal', 200) }) diff --git a/web/cypress/tsconfig.eslint.json b/web/cypress/tsconfig.eslint.json new file mode 100644 index 000000000..5b01aa287 --- /dev/null +++ b/web/cypress/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["./integration", "./support", "./fixtures", "./"], + "exclude": ["node_modules"] +} diff --git a/web/cypress/tsconfig.json b/web/cypress/tsconfig.json new file mode 100644 index 000000000..1468f9127 --- /dev/null +++ b/web/cypress/tsconfig.json @@ -0,0 +1,13 @@ +{ + "compilerOptions": { + "module": "commonjs", + "target": "es2018", + "lib": ["es5", "dom"], + "types": ["cypress", "node", "@testing-library/cypress"], + "moduleResolution": "node", + "esModuleInterop": true, + "skipLibCheck": true, + "outDir": "dist" + }, + "include": ["**/*.ts"] +} diff --git a/web/cypress/utils/generateRequestObject.ts b/web/cypress/utils/generateRequestObject.ts new file mode 100644 index 000000000..bf5874f37 --- /dev/null +++ b/web/cypress/utils/generateRequestObject.ts @@ -0,0 +1,68 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +import qs from 'qs' +import { mapKeys } from 'lodash-es' + +export const generateHeaders = (headers: RequestInit['headers'] = {}): RequestInit['headers'] => { + const retHeaders: RequestInit['headers'] = { + 'content-type': 'application/json' + } + + const token = localStorage.getItem('token') + + if (token && token.length > 0) { + const parsedToken = JSON.parse(decodeURIComponent(atob(token))) + retHeaders.Authorization = `Bearer ${parsedToken}` + } + + Object.assign( + retHeaders, + mapKeys(headers, (_value, key) => key.toLowerCase()) + ) + + return retHeaders +} + +export const generateRequestObject = ( + method: string, + endpoint: string, + body?: any, + queryParams?: { [key: string]: string }, + headerOptions?: HeadersInit, + origin?: string +): object => { + const headers = generateHeaders(headerOptions) + const apiOrigin = origin ?? window.location.origin + let url = `${apiOrigin}/${endpoint}` + if (method === 'DELETE' && typeof body === 'string') { + url += `/${body}` + } + if (queryParams && Object.keys(queryParams).length) { + url += `?${qs.stringify(queryParams)}` + } + + let requestBody: BodyInit | null = null + + if (body instanceof FormData) { + requestBody = body + } else if (typeof body === 'object') { + try { + requestBody = JSON.stringify(body) + } catch { + requestBody = body + } + } else { + requestBody = body + } + return { + method: method, + url: url, + body: requestBody, + headers: headers + } +} diff --git a/web/cypress/utils/getIdentifierFromName.ts b/web/cypress/utils/getIdentifierFromName.ts new file mode 100644 index 000000000..5f9d42d69 --- /dev/null +++ b/web/cypress/utils/getIdentifierFromName.ts @@ -0,0 +1,13 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +export const getIdentifierFromName = (str: string): string => { + return str + .trim() + .replace(/[^0-9a-zA-Z_$\- ]/g, '') // remove special chars except _ and $ + .replace(/ +/g, '_') // replace spaces with _ +} diff --git a/web/cypress/utils/getRandomNameByType.ts b/web/cypress/utils/getRandomNameByType.ts new file mode 100644 index 000000000..059c5f995 --- /dev/null +++ b/web/cypress/utils/getRandomNameByType.ts @@ -0,0 +1,20 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +import { getIdentifierFromName } from './getIdentifierFromName' +import { AUTOMATION_PROJECT_PREFIX, EntityType } from './types' + +const hashID = (): string => crypto.randomUUID() + +export const getRandomNameByType = ( + type: EntityType, + seperator = '_', + projectPrefix: AUTOMATION_PROJECT_PREFIX = 'cypress' +): string => { + const randomName = `${projectPrefix}${seperator}${type}${seperator}${hashID()}` + return getIdentifierFromName(randomName) +} diff --git a/web/cypress/utils/getRequestBodies.ts b/web/cypress/utils/getRequestBodies.ts new file mode 100644 index 000000000..7c9486e82 --- /dev/null +++ b/web/cypress/utils/getRequestBodies.ts @@ -0,0 +1,32 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { PACKAGE_TYPE, REGISTRY_TYPE } from './types' + +export function getRandomCreateRegistryBody(name: string, packageType: PACKAGE_TYPE, type: REGISTRY_TYPE) { + return { + identifier: name, + allowedPatterns: [], + blockedPatterns: [], + cleanupPolicy: [], + config: { + type + }, + description: 'Created using cypress', + labels: [], + packageType + } +} diff --git a/web/cypress/utils/types.ts b/web/cypress/utils/types.ts new file mode 100644 index 000000000..1daef20a9 --- /dev/null +++ b/web/cypress/utils/types.ts @@ -0,0 +1,14 @@ +/* + * Copyright 2024 Harness Inc. All rights reserved. + * Use of this source code is governed by the PolyForm Shield 1.0.0 license + * that can be found in the licenses directory at the root of this repository, also available at + * https://polyformproject.org/wp-content/uploads/2020/06/PolyForm-Shield-1.0.0.txt. + */ + +export type EntityType = 'project' | 'registry' | 'upstreamProxy' + +export type AUTOMATION_PROJECT_PREFIX = 'cypress' + +export type PACKAGE_TYPE = 'DOCKER' | 'HELM' + +export type REGISTRY_TYPE = 'VIRTUAL' | 'UPSTREAM' diff --git a/web/cypress/yarn.lock b/web/cypress/yarn.lock index f678f8931..9f9fc6df8 100644 --- a/web/cypress/yarn.lock +++ b/web/cypress/yarn.lock @@ -2,11 +2,48 @@ # yarn lockfile v1 +"@babel/code-frame@^7.10.4": + version "7.24.7" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.7.tgz#882fd9e09e8ee324e496bd040401c6f046ef4465" + integrity sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA== + dependencies: + "@babel/highlight" "^7.24.7" + picocolors "^1.0.0" + +"@babel/helper-validator-identifier@^7.24.7": + version "7.24.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz#75b889cfaf9e35c2aaf42cf0d72c8e91719251db" + integrity sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w== + +"@babel/highlight@^7.24.7": + version "7.24.7" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.7.tgz#a05ab1df134b286558aae0ed41e6c5f731bf409d" + integrity sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw== + dependencies: + "@babel/helper-validator-identifier" "^7.24.7" + chalk "^2.4.2" + js-tokens "^4.0.0" + picocolors "^1.0.0" + +"@babel/runtime@^7.12.5", "@babel/runtime@^7.14.6": + version "7.24.7" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.7.tgz#f4f0d5530e8dbdf59b3451b9b3e594b6ba082e12" + integrity sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw== + dependencies: + regenerator-runtime "^0.14.0" + "@colors/colors@1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== +"@cspotcode/source-map-support@^0.8.0": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" + integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== + dependencies: + "@jridgewell/trace-mapping" "0.3.9" + "@cypress/request@^2.88.10": version "2.88.12" resolved "https://registry.yarnpkg.com/@cypress/request/-/request-2.88.12.tgz#ba4911431738494a85e93fb04498cb38bc55d590" @@ -51,6 +88,24 @@ dependencies: "@hapi/hoek" "^9.0.0" +"@jridgewell/resolve-uri@^3.0.3": + version "3.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== + +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" + integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== + +"@jridgewell/trace-mapping@0.3.9": + version "0.3.9" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" + integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@sideway/address@^4.1.5": version "4.1.5" resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.5.tgz#4bc149a0076623ced99ca8208ba780d65a99b9d5" @@ -68,6 +123,98 @@ resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== +"@testing-library/cypress@^8.0.3": + version "8.0.7" + resolved "https://registry.yarnpkg.com/@testing-library/cypress/-/cypress-8.0.7.tgz#18315eba3cf8852808afadf122e4858406384015" + integrity sha512-3HTV725rOS+YHve/gD9coZp/UcPK5xhr4H0GMnq/ni6USdtzVtSOG9WBFtd8rYnrXk8rrGD+0toRFYouJNIG0Q== + dependencies: + "@babel/runtime" "^7.14.6" + "@testing-library/dom" "^8.1.0" + +"@testing-library/dom@^8.1.0": + version "8.20.1" + resolved "https://registry.yarnpkg.com/@testing-library/dom/-/dom-8.20.1.tgz#2e52a32e46fc88369eef7eef634ac2a192decd9f" + integrity sha512-/DiOQ5xBxgdYRC8LNk7U+RWat0S3qRLeIw3ZIkMQ9kkVlRmwD/Eg8k8CqIpD6GW7u20JIUOfMKbxtiLutpjQ4g== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/runtime" "^7.12.5" + "@types/aria-query" "^5.0.1" + aria-query "5.1.3" + chalk "^4.1.0" + dom-accessibility-api "^0.5.9" + lz-string "^1.5.0" + pretty-format "^27.0.2" + +"@tsconfig/node10@^1.0.7": + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.11.tgz#6ee46400685f130e278128c7b38b7e031ff5b2f2" + integrity sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw== + +"@tsconfig/node12@^1.0.7": + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" + integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== + +"@tsconfig/node14@^1.0.0": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" + integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== + +"@tsconfig/node16@^1.0.2": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" + integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== + +"@types/aria-query@^5.0.1": + version "5.0.4" + resolved "https://registry.yarnpkg.com/@types/aria-query/-/aria-query-5.0.4.tgz#1a31c3d378850d2778dabb6374d036dcba4ba708" + integrity sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw== + +"@types/body-parser@*": + version "1.19.5" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.5.tgz#04ce9a3b677dc8bd681a17da1ab9835dc9d3ede4" + integrity sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg== + dependencies: + "@types/connect" "*" + "@types/node" "*" + +"@types/connect@*": + version "3.4.38" + resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.38.tgz#5ba7f3bc4fbbdeaff8dded952e5ff2cc53f8d858" + integrity sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug== + dependencies: + "@types/node" "*" + +"@types/express-serve-static-core@^4.17.33": + version "4.19.5" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz#218064e321126fcf9048d1ca25dd2465da55d9c6" + integrity sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" + "@types/send" "*" + +"@types/express@^4.17.21": + version "4.17.21" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.21.tgz#c26d4a151e60efe0084b23dc3369ebc631ed192d" + integrity sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ== + dependencies: + "@types/body-parser" "*" + "@types/express-serve-static-core" "^4.17.33" + "@types/qs" "*" + "@types/serve-static" "*" + +"@types/http-errors@*": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@types/http-errors/-/http-errors-2.0.4.tgz#7eb47726c391b7345a6ec35ad7f4de469cf5ba4f" + integrity sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA== + +"@types/mime@^1": + version "1.3.5" + resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.5.tgz#1ef302e01cf7d2b5a0fa526790c9123bf1d06690" + integrity sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w== + "@types/node@*": version "20.12.7" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.7.tgz#04080362fa3dd6c5822061aa3124f5c152cff384" @@ -80,6 +227,40 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-14.18.63.tgz#1788fa8da838dbb5f9ea994b834278205db6ca2b" integrity sha512-fAtCfv4jJg+ExtXhvCkCqUKZ+4ok/JQk01qDKhL5BDDoS3AxKXhV5/MAVUZyQnSEd2GT92fkgZl0pz0Q0AzcIQ== +"@types/node@^20.14.10": + version "20.14.10" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.14.10.tgz#a1a218290f1b6428682e3af044785e5874db469a" + integrity sha512-MdiXf+nDuMvY0gJKxyfZ7/6UFsETO7mGKF54MVD/ekJS6HdFtpZFBgrh6Pseu64XTb2MLyFPlbW6hj8HYRQNOQ== + dependencies: + undici-types "~5.26.4" + +"@types/qs@*": + version "6.9.15" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" + integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== + +"@types/range-parser@*": + version "1.2.7" + resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.7.tgz#50ae4353eaaddc04044279812f52c8c65857dbcb" + integrity sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ== + +"@types/send@*": + version "0.17.4" + resolved "https://registry.yarnpkg.com/@types/send/-/send-0.17.4.tgz#6619cd24e7270793702e4e6a4b958a9010cfc57a" + integrity sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA== + dependencies: + "@types/mime" "^1" + "@types/node" "*" + +"@types/serve-static@*": + version "1.15.7" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.7.tgz#22174bbd74fb97fe303109738e9b5c2f3064f714" + integrity sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw== + dependencies: + "@types/http-errors" "*" + "@types/node" "*" + "@types/send" "*" + "@types/sinonjs__fake-timers@8.1.1": version "8.1.1" resolved "https://registry.yarnpkg.com/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.1.tgz#b49c2c70150141a15e0fa7e79cf1f92a72934ce3" @@ -97,6 +278,26 @@ dependencies: "@types/node" "*" +accepts@~1.3.8: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== + dependencies: + mime-types "~2.1.34" + negotiator "0.6.3" + +acorn-walk@^8.1.1: + version "8.3.3" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.3.tgz#9caeac29eefaa0c41e3d4c65137de4d6f34df43e" + integrity sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw== + dependencies: + acorn "^8.11.0" + +acorn@^8.11.0, acorn@^8.4.1: + version "8.12.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.12.1.tgz#71616bdccbe25e27a54439e0046e89ca76df2248" + integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg== + aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -122,6 +323,13 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + ansi-styles@^4.0.0, ansi-styles@^4.1.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" @@ -129,11 +337,49 @@ ansi-styles@^4.0.0, ansi-styles@^4.1.0: dependencies: color-convert "^2.0.1" +ansi-styles@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" + integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== + +anymatch@~3.1.2: + version "3.1.3" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + arch@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/arch/-/arch-2.2.0.tgz#1bc47818f305764f23ab3306b0bfc086c5a29d11" integrity sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ== +arg@^4.1.0: + version "4.1.3" + resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" + integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== + +aria-query@5.1.3: + version "5.1.3" + resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-5.1.3.tgz#19db27cd101152773631396f7a95a3b58c22c35e" + integrity sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ== + dependencies: + deep-equal "^2.0.5" + +array-buffer-byte-length@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f" + integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg== + dependencies: + call-bind "^1.0.5" + is-array-buffer "^3.0.4" + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== + asn1@~0.2.3: version "0.2.6" resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" @@ -166,6 +412,13 @@ at-least-node@^1.0.0: resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== +available-typed-arrays@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== + dependencies: + possible-typed-array-names "^1.0.0" + aws-sign2@~0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" @@ -183,6 +436,11 @@ axios@^0.25.0: dependencies: follow-redirects "^1.14.7" +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + base64-js@^1.3.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -195,6 +453,11 @@ bcrypt-pbkdf@^1.0.0: dependencies: tweetnacl "^0.14.3" +binary-extensions@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== + blob-util@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/blob-util/-/blob-util-2.0.2.tgz#3b4e3c281111bb7f11128518006cdc60b403a1eb" @@ -205,6 +468,39 @@ bluebird@^3.7.2: resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== +body-parser@1.20.2: + version "1.20.2" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd" + integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA== + dependencies: + bytes "3.1.2" + content-type "~1.0.5" + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + http-errors "2.0.0" + iconv-lite "0.4.24" + on-finished "2.4.1" + qs "6.11.0" + raw-body "2.5.2" + type-is "~1.6.18" + unpipe "1.0.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@~3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== + dependencies: + fill-range "^7.1.1" + buffer-crc32@~0.2.3: version "0.2.13" resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz#0d333e3f00eac50aa1454abd30ef8c2a5d9a7242" @@ -218,12 +514,17 @@ buffer@^5.6.0: base64-js "^1.3.1" ieee754 "^1.1.13" +bytes@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== + cachedir@^2.3.0: version "2.4.0" resolved "https://registry.yarnpkg.com/cachedir/-/cachedir-2.4.0.tgz#7fef9cf7367233d7c88068fe6e34ed0d355a610d" integrity sha512-9EtFOZR8g22CL7BWjJ9BUx1+A/djkofnyW3aOXZORNW2kxoUpx2h+uN2cOqwPmFhnpVmxg+KW2OjOSgChTEvsQ== -call-bind@^1.0.7: +call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== @@ -239,6 +540,15 @@ caseless@~0.12.0: resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== +chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + chalk@^4.1.0: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" @@ -252,6 +562,26 @@ check-more-types@^2.24.0: resolved "https://registry.yarnpkg.com/check-more-types/-/check-more-types-2.24.0.tgz#1420ffb10fd444dcfc79b43891bbfffd32a84600" integrity sha512-Pj779qHxV2tuapviy1bSZNEL1maXr13bPYpsvSDB68HlYcYuhlDrmGd63i0JHMCLKzc7rUSNIrpdJlhVlNwrxA== +child_process@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/child_process/-/child_process-1.0.2.tgz#b1f7e7fc73d25e7fd1d455adc94e143830182b5a" + integrity sha512-Wmza/JzL0SiWz7kl6MhIKT5ceIlnFPJX+lwUGj7Clhy5MMldsSoJR0+uvRzOS5Kv45Mq7t1PoE8TsOA9bzvb6g== + +chokidar@^3.5.2: + version "3.6.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" + integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== + dependencies: + anymatch "~3.1.2" + braces "~3.0.2" + glob-parent "~5.1.2" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.6.0" + optionalDependencies: + fsevents "~2.3.2" + ci-info@^3.2.0: version "3.9.0" resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" @@ -286,6 +616,13 @@ cli-truncate@^2.1.0: slice-ansi "^3.0.0" string-width "^4.2.0" +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + color-convert@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" @@ -293,6 +630,11 @@ color-convert@^2.0.1: dependencies: color-name "~1.1.4" +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + color-name@~1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" @@ -320,11 +662,43 @@ common-tags@^1.8.0: resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.2.tgz#94ebb3c076d26032745fd54face7f688ef5ac9c6" integrity sha512-gk/Z852D2Wtb//0I+kRFNKKE9dIIVirjoqPoA1wJU+XePVXZfGeBpk45+A1rKO4Q43prqWBNY/MiIeRLbPWUaA== +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +content-disposition@0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe" + integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ== + dependencies: + safe-buffer "5.2.1" + +content-type@~1.0.4, content-type@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" + integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== + +cookie@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.6.0.tgz#2798b04b071b0ecbff0dbb62a505a8efa4e19051" + integrity sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw== + core-util-is@1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== +create-require@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" + integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== + cross-spawn@^7.0.0: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" @@ -334,10 +708,10 @@ cross-spawn@^7.0.0: shebang-command "^2.0.0" which "^2.0.1" -cypress@10.8.0: - version "10.8.0" - resolved "https://registry.yarnpkg.com/cypress/-/cypress-10.8.0.tgz#12a681f2642b6f13d636bab65d5b71abdb1497a5" - integrity sha512-QVse0dnLm018hgti2enKMVZR9qbIO488YGX06nH5j3Dg1isL38DwrBtyrax02CANU6y8F4EJUuyW6HJKw1jsFA== +cypress@^11.1.0: + version "11.2.0" + resolved "https://registry.yarnpkg.com/cypress/-/cypress-11.2.0.tgz#63edef8c387b687066c5493f6f0ad7b9ced4b2b7" + integrity sha512-u61UGwtu7lpsNWLUma/FKNOsrjcI6wleNmda/TyKHe0dOBcVjbCPlp1N6uwFZ0doXev7f/91YDpU9bqDCFeBLA== dependencies: "@cypress/request" "^2.88.10" "@cypress/xvfb" "^1.2.4" @@ -394,6 +768,13 @@ dayjs@^1.10.4: resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.10.tgz#68acea85317a6e164457d6d6947564029a6a16a0" integrity sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ== +debug@2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + debug@^3.1.0: version "3.2.7" resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" @@ -401,6 +782,13 @@ debug@^3.1.0: dependencies: ms "^2.1.1" +debug@^4: + version "4.3.5" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e" + integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg== + dependencies: + ms "2.1.2" + debug@^4.1.1, debug@^4.3.2: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" @@ -408,7 +796,31 @@ debug@^4.1.1, debug@^4.3.2: dependencies: ms "2.1.2" -define-data-property@^1.1.4: +deep-equal@^2.0.5: + version "2.2.3" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-2.2.3.tgz#af89dafb23a396c7da3e862abc0be27cf51d56e1" + integrity sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA== + dependencies: + array-buffer-byte-length "^1.0.0" + call-bind "^1.0.5" + es-get-iterator "^1.1.3" + get-intrinsic "^1.2.2" + is-arguments "^1.1.1" + is-array-buffer "^3.0.2" + is-date-object "^1.0.5" + is-regex "^1.1.4" + is-shared-array-buffer "^1.0.2" + isarray "^2.0.5" + object-is "^1.1.5" + object-keys "^1.1.1" + object.assign "^4.1.4" + regexp.prototype.flags "^1.5.1" + side-channel "^1.0.4" + which-boxed-primitive "^1.0.2" + which-collection "^1.0.1" + which-typed-array "^1.1.13" + +define-data-property@^1.0.1, define-data-property@^1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== @@ -417,11 +829,40 @@ define-data-property@^1.1.4: es-errors "^1.3.0" gopd "^1.0.1" +define-properties@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" + integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== + dependencies: + define-data-property "^1.0.1" + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + delayed-stream@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== +depd@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== + +diff@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" + integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== + +dom-accessibility-api@^0.5.9: + version "0.5.16" + resolved "https://registry.yarnpkg.com/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz#5a7429e6066eb3664d911e33fb0e45de8eb08453" + integrity sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg== + ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" @@ -430,11 +871,21 @@ ecc-jsbn@~0.1.1: jsbn "~0.1.0" safer-buffer "^2.1.0" +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== + emoji-regex@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== + end-of-stream@^1.1.0: version "1.4.4" resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" @@ -462,11 +913,36 @@ es-errors@^1.3.0: resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== +es-get-iterator@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/es-get-iterator/-/es-get-iterator-1.1.3.tgz#3ef87523c5d464d41084b2c3c9c214f1199763d6" + integrity sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.3" + has-symbols "^1.0.3" + is-arguments "^1.1.1" + is-map "^2.0.2" + is-set "^2.0.2" + is-string "^1.0.7" + isarray "^2.0.5" + stop-iteration-iterator "^1.0.0" + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== + escape-string-regexp@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== + eventemitter2@6.4.7: version "6.4.7" resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-6.4.7.tgz#a7f6c4d7abf28a14c1ef3442f21cb306a054271d" @@ -494,6 +970,43 @@ executable@^4.1.1: dependencies: pify "^2.2.0" +express@^4.19.2: + version "4.19.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.19.2.tgz#e25437827a3aa7f2a827bc8171bbbb664a356465" + integrity sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q== + dependencies: + accepts "~1.3.8" + array-flatten "1.1.1" + body-parser "1.20.2" + content-disposition "0.5.4" + content-type "~1.0.4" + cookie "0.6.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "2.0.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.2.0" + fresh "0.5.2" + http-errors "2.0.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "2.4.1" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.7" + qs "6.11.0" + range-parser "~1.2.1" + safe-buffer "5.2.1" + send "0.18.0" + serve-static "1.15.0" + setprototypeof "1.2.0" + statuses "2.0.1" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + extend@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" @@ -534,11 +1047,38 @@ figures@^3.2.0: dependencies: escape-string-regexp "^1.0.5" +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== + dependencies: + to-regex-range "^5.0.1" + +finalhandler@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" + integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "2.4.1" + parseurl "~1.3.3" + statuses "2.0.1" + unpipe "~1.0.0" + follow-redirects@^1.14.7: version "1.15.6" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b" integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA== +for-each@^0.3.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e" + integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw== + dependencies: + is-callable "^1.1.3" + forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" @@ -553,6 +1093,16 @@ form-data@~2.3.2: combined-stream "^1.0.6" mime-types "^2.1.12" +forwarded@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" + integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== + fs-extra@^9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" @@ -563,12 +1113,22 @@ fs-extra@^9.1.0: jsonfile "^6.0.1" universalify "^2.0.0" +fsevents@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + function-bind@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== -get-intrinsic@^1.1.3, get-intrinsic@^1.2.4: +functions-have-names@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" + integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== + +get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2, get-intrinsic@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== @@ -600,6 +1160,13 @@ getpass@^0.1.1: dependencies: assert-plus "^1.0.0" +glob-parent@~5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + global-dirs@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-3.0.1.tgz#0c488971f066baceda21447aecb1a8b911d22485" @@ -619,12 +1186,22 @@ graceful-fs@^4.1.6, graceful-fs@^4.2.0: resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== +has-bigints@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" + integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + has-flag@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-property-descriptors@^1.0.2: +has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== @@ -636,11 +1213,18 @@ has-proto@^1.0.1: resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== -has-symbols@^1.0.3: +has-symbols@^1.0.2, has-symbols@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + hasown@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" @@ -648,6 +1232,17 @@ hasown@^2.0.0: dependencies: function-bind "^1.1.2" +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== + dependencies: + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" + http-signature@~1.3.6: version "1.3.6" resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.3.6.tgz#cb6fbfdf86d1c974f343be94e87f7fc128662cf9" @@ -662,21 +1257,95 @@ human-signals@^1.1.1: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== +iconv-lite@0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + ieee754@^1.1.13: version "1.2.1" resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== +ignore-by-default@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ignore-by-default/-/ignore-by-default-1.0.1.tgz#48ca6d72f6c6a3af00a9ad4ae6876be3889e2b09" + integrity sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA== + indent-string@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== +inherits@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + ini@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ini/-/ini-2.0.0.tgz#e5fd556ecdd5726be978fa1001862eacb0a94bc5" integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== +internal-slot@^1.0.4: + version "1.0.7" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802" + integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g== + dependencies: + es-errors "^1.3.0" + hasown "^2.0.0" + side-channel "^1.0.4" + +ipaddr.js@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +is-arguments@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.1.tgz#15b3f88fda01f2a97fec84ca761a560f123efa9b" + integrity sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-array-buffer@^3.0.2, is-array-buffer@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98" + integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.1" + +is-bigint@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" + integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg== + dependencies: + has-bigints "^1.0.1" + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-boolean-object@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" + integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-callable@^1.1.3: + version "1.2.7" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" + integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== + is-ci@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-3.0.1.tgz#db6ecbed1bd659c43dac0f45661e7674103d1867" @@ -684,11 +1353,30 @@ is-ci@^3.0.0: dependencies: ci-info "^3.2.0" +is-date-object@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" + integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== + dependencies: + has-tostringtag "^1.0.0" + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== + is-fullwidth-code-point@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== +is-glob@^4.0.1, is-glob@~4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + is-installed-globally@~0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.4.0.tgz#9a0fd407949c30f86eb6959ef1b7994ed0b7b520" @@ -697,16 +1385,67 @@ is-installed-globally@~0.4.0: global-dirs "^3.0.0" is-path-inside "^3.0.2" +is-map@^2.0.2, is-map@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" + integrity sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw== + +is-number-object@^1.0.4: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc" + integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ== + dependencies: + has-tostringtag "^1.0.0" + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + is-path-inside@^3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== +is-regex@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" + integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-set@^2.0.2, is-set@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" + integrity sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg== + +is-shared-array-buffer@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688" + integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg== + dependencies: + call-bind "^1.0.7" + is-stream@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== +is-string@^1.0.5, is-string@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" + integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + dependencies: + has-tostringtag "^1.0.0" + +is-symbol@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" + integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== + dependencies: + has-symbols "^1.0.2" + is-typedarray@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" @@ -717,6 +1456,24 @@ is-unicode-supported@^0.1.0: resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== +is-weakmap@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" + integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== + +is-weakset@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.3.tgz#e801519df8c0c43e12ff2834eead84ec9e624007" + integrity sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + isexe@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" @@ -738,6 +1495,11 @@ joi@^17.6.0: "@sideway/formula" "^3.0.1" "@sideway/pinpoint" "^2.0.0" +js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + jsbn@~0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" @@ -826,43 +1588,111 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" +lz-string@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/lz-string/-/lz-string-1.5.0.tgz#c1ab50f77887b712621201ba9fd4e3a6ed099941" + integrity sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ== + +make-error@^1.1.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== + merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== + mime-db@1.52.0: version "1.52.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== -mime-types@^2.1.12, mime-types@~2.1.19: +mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34: version "2.1.35" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== dependencies: mime-db "1.52.0" +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + mimic-fn@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== +minimatch@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + minimist@^1.2.5, minimist@^1.2.6: version "1.2.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== + ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.1.1: +ms@2.1.3, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== + +nodemon@^3.1.4: + version "3.1.4" + resolved "https://registry.yarnpkg.com/nodemon/-/nodemon-3.1.4.tgz#c34dcd8eb46a05723ccde60cbdd25addcc8725e4" + integrity sha512-wjPBbFhtpJwmIeY2yP7QF+UKzPfltVGtfce1g/bB15/8vCGZj8uxD62b/b9M9/WVgme0NZudpownKN+c0plXlQ== + dependencies: + chokidar "^3.5.2" + debug "^4" + ignore-by-default "^1.0.1" + minimatch "^3.1.2" + pstree.remy "^1.1.8" + semver "^7.5.3" + simple-update-notifier "^2.0.0" + supports-color "^5.5.0" + touch "^3.1.0" + undefsafe "^2.0.5" + +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + npm-run-path@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" @@ -875,6 +1705,36 @@ object-inspect@^1.13.1: resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== +object-is@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.6.tgz#1a6a53aed2dd8f7e6775ff870bea58545956ab07" + integrity sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + +object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object.assign@^4.1.4: + version "4.1.5" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" + integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + has-symbols "^1.0.3" + object-keys "^1.1.1" + +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== + dependencies: + ee-first "1.1.1" + once@^1.3.1, once@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" @@ -901,11 +1761,21 @@ p-map@^4.0.0: dependencies: aggregate-error "^3.0.0" +parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + path-key@^3.0.0, path-key@^3.1.0: version "3.1.1" resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== + pend@~1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50" @@ -916,16 +1786,48 @@ performance-now@^2.1.0: resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== +picocolors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1" + integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew== + +picomatch@^2.0.4, picomatch@^2.2.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + pify@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== +possible-typed-array-names@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" + integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + pretty-bytes@^5.6.0: version "5.6.0" resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.6.0.tgz#356256f643804773c82f64723fe78c92c62beaeb" integrity sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg== +pretty-format@^27.0.2: + version "27.5.1" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-27.5.1.tgz#2181879fdea51a7a5851fb39d920faa63f01d88e" + integrity sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ== + dependencies: + ansi-regex "^5.0.1" + ansi-styles "^5.0.0" + react-is "^17.0.1" + +proxy-addr@~2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" + integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== + dependencies: + forwarded "0.2.0" + ipaddr.js "1.9.1" + proxy-from-env@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.0.0.tgz#33c50398f70ea7eb96d21f7b817630a55791c7ee" @@ -936,6 +1838,11 @@ psl@^1.1.33: resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== +pstree.remy@^1.1.8: + version "1.1.8" + resolved "https://registry.yarnpkg.com/pstree.remy/-/pstree.remy-1.1.8.tgz#c242224f4a67c21f686839bbdb4ac282b8373d3a" + integrity sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w== + pump@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" @@ -949,6 +1856,13 @@ punycode@^2.1.1: resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== +qs@6.11.0: + version "6.11.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" + integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== + dependencies: + side-channel "^1.0.4" + qs@~6.10.3: version "6.10.5" resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.5.tgz#974715920a80ff6a262264acd2c7e6c2a53282b4" @@ -961,6 +1875,48 @@ querystringify@^2.1.1: resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== +range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a" + integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA== + dependencies: + bytes "3.1.2" + http-errors "2.0.0" + iconv-lite "0.4.24" + unpipe "1.0.0" + +react-is@^17.0.1: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== + +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== + dependencies: + picomatch "^2.2.1" + +regenerator-runtime@^0.14.0: + version "0.14.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" + integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== + +regexp.prototype.flags@^1.5.1: + version "1.5.2" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334" + integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw== + dependencies: + call-bind "^1.0.6" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.1" + request-progress@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-3.0.0.tgz#4ca754081c7fec63f505e4faa825aa06cd669dbe" @@ -993,12 +1949,12 @@ rxjs@^7.5.1, rxjs@^7.5.4: dependencies: tslib "^2.1.0" -safe-buffer@^5.0.1, safe-buffer@^5.1.2: +safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.2: version "5.2.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== @@ -1010,6 +1966,40 @@ semver@^7.3.2: dependencies: lru-cache "^6.0.0" +semver@^7.5.3: + version "7.6.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13" + integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w== + +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== + dependencies: + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "2.0.0" + mime "1.6.0" + ms "2.1.3" + on-finished "2.4.1" + range-parser "~1.2.1" + statuses "2.0.1" + +serve-static@1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.18.0" + set-function-length@^1.2.1: version "1.2.2" resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" @@ -1022,6 +2012,21 @@ set-function-length@^1.2.1: gopd "^1.0.1" has-property-descriptors "^1.0.2" +set-function-name@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + functions-have-names "^1.2.3" + has-property-descriptors "^1.0.2" + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -1049,6 +2054,13 @@ signal-exit@^3.0.2: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +simple-update-notifier@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz#d70b92bdab7d6d90dfd73931195a30b6e3d7cebb" + integrity sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w== + dependencies: + semver "^7.5.3" + slice-ansi@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-3.0.0.tgz#31ddc10930a1b7e0b67b08c96c2f49b77a789787" @@ -1082,6 +2094,18 @@ sshpk@^1.14.1: safer-buffer "^2.0.2" tweetnacl "~0.14.0" +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +stop-iteration-iterator@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz#6a60be0b4ee757d1ed5254858ec66b10c49285e4" + integrity sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ== + dependencies: + internal-slot "^1.0.4" + string-width@^4.1.0, string-width@^4.2.0: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" @@ -1103,6 +2127,13 @@ strip-final-newline@^2.0.0: resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== +supports-color@^5.3.0, supports-color@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + supports-color@^7.1.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" @@ -1132,6 +2163,23 @@ tmp@~0.2.1: resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae" integrity sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w== +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + +touch@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/touch/-/touch-3.1.1.tgz#097a23d7b161476435e5c1344a95c0f75b4a5694" + integrity sha512-r0eojU4bI8MnHr8c5bNo7lJDdI2qXlWWJk6a9EAFG7vbhTjElYhBVS3/miuE0uOuoLdb8Mc/rVfsmm6eo5o9GA== + tough-cookie@^4.1.3: version "4.1.3" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.3.tgz#97b9adb0728b42280aa3d814b6b999b2ff0318bf" @@ -1142,6 +2190,25 @@ tough-cookie@^4.1.3: universalify "^0.2.0" url-parse "^1.5.3" +ts-node@^10.9.2: + version "10.9.2" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.2.tgz#70f021c9e185bccdca820e26dc413805c101c71f" + integrity sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ== + dependencies: + "@cspotcode/source-map-support" "^0.8.0" + "@tsconfig/node10" "^1.0.7" + "@tsconfig/node12" "^1.0.7" + "@tsconfig/node14" "^1.0.0" + "@tsconfig/node16" "^1.0.2" + acorn "^8.4.1" + acorn-walk "^8.1.1" + arg "^4.1.0" + create-require "^1.1.0" + diff "^4.0.1" + make-error "^1.1.1" + v8-compile-cache-lib "^3.0.1" + yn "3.1.1" + tslib@^2.1.0: version "2.6.2" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" @@ -1164,6 +2231,24 @@ type-fest@^0.21.3: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== +type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== + dependencies: + media-typer "0.3.0" + mime-types "~2.1.24" + +typescript@^4.8.4: + version "4.9.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a" + integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== + +undefsafe@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/undefsafe/-/undefsafe-2.0.5.tgz#38733b9327bdcd226db889fb723a6efd162e6e2c" + integrity sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA== + undici-types@~5.26.4: version "5.26.5" resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" @@ -1179,6 +2264,11 @@ universalify@^2.0.0: resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== + untildify@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/untildify/-/untildify-4.0.0.tgz#2bc947b953652487e4600949fb091e3ae8cd919b" @@ -1192,11 +2282,26 @@ url-parse@^1.5.3: querystringify "^2.1.1" requires-port "^1.0.0" +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== + uuid@^8.3.2: version "8.3.2" resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +v8-compile-cache-lib@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" + integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== + verror@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" @@ -1217,6 +2322,38 @@ wait-on@^6.0.1: minimist "^1.2.5" rxjs "^7.5.4" +which-boxed-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" + integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== + dependencies: + is-bigint "^1.0.1" + is-boolean-object "^1.1.0" + is-number-object "^1.0.4" + is-string "^1.0.5" + is-symbol "^1.0.3" + +which-collection@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" + integrity sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw== + dependencies: + is-map "^2.0.3" + is-set "^2.0.3" + is-weakmap "^2.0.2" + is-weakset "^2.0.3" + +which-typed-array@^1.1.13: + version "1.1.15" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" + integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-tostringtag "^1.0.2" + which@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" @@ -1259,3 +2396,8 @@ yauzl@^2.10.0: dependencies: buffer-crc32 "~0.2.3" fd-slicer "~1.1.0" + +yn@3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" + integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== diff --git a/web/package.json b/web/package.json index 38f2aa70d..779201390 100644 --- a/web/package.json +++ b/web/package.json @@ -15,15 +15,19 @@ }, "keywords": [], "scripts": { + "prepare": "cd .. && husky install web/.husky", "webpack": "NODE_ENV=development webpack serve --config config/webpack.dev.js", "webpack:cde": "NODE_ENV=development MODULE=cde webpack serve --config config/webpack.dev.js", "typed-scss": "typed-scss-modules src --watch", "dev": "run-p webpack typed-scss", "dev:cde": "MODULE=cde run-p webpack:cde typed-scss", + "dev:cypress": "NODE_ENV=development TARGET_LOCALHOST=false CYPRESS=true PROJECT_ID=CYPRESS_PROJECT_ID ORG_ID=CYPRESS_ORG_ID ACCOUNT_ID=CYPRESS_ACCOUNT_ID BASE_URL=http://localhost:8080 NODE_OPTIONS=\"--max-old-space-size=6144\" webpack serve --progress --config config/webpack.dev.js", + "dev:ar": "NODE_ENV=development webpack serve --config src/ar/config/webpack.dev.js", "test": "jest src --silent", "test:watch": "jest --watch", "build": "rm -rf dist && webpack --config config/webpack.prod.js", "build:cde": "rm -rf dist && MODULE=cde webpack --config config/webpack.prod.js", + "build:ar": "rm -rf dist && webpack --config src/ar/config/webpack.prod.js", "lint": "eslint --rulesdir ./scripts/eslint-rules --ext .ts --ext .tsx src", "prettier": "prettier --check \"./src/**/*.{ts,tsx,css,scss}\"", "coverage": "npm test --coverage", @@ -34,8 +38,7 @@ "services": "restful-react import --config restful-react.config.js", "postservices": "prettier --write src/services/**/*.tsx", "strings": "npm-run-all strings:*", - "strings:genTypes": "node scripts/strings/generateTypesCli.mjs", - "dev:cypress": "NODE_ENV=development TARGET_LOCALHOST=false CYPRESS=true PROJECT_ID=CYPRESS_PROJECT_ID ORG_ID=CYPRESS_ORG_ID ACCOUNT_ID=CYPRESS_ACCOUNT_ID BASE_URL=http://localhost:8080 NODE_OPTIONS=\"--max-old-space-size=6144\" webpack serve --progress --config config/webpack.dev.js" + "strings:genTypes": "node scripts/strings/generateTypesCli.mjs" }, "dependencies": { "@blueprintjs/core": "3.26.1", @@ -47,8 +50,10 @@ "@codemirror/state": "^6.2.0", "@codemirror/view": "^6.9.6", "@harnessio/design-system": "^2.1.1", - "@harnessio/icons": "^2.1.5", + "@harnessio/icons": "^2.1.7", + "@harnessio/react-har-service-client": "^0.0.15", "@harnessio/uicore": "^4.1.2", + "@tanstack/react-query": "4.20.4", "@types/dompurify": "^3.0.2", "@types/react-monaco-editor": "^0.16.0", "@uiw/codemirror-extensions-color": "^4.19.9", @@ -59,12 +64,15 @@ "classnames": "^2.2.6", "clipboard-copy": "^3.1.0", "diff2html": "3.4.22", - "event-source-polyfill": "^1.0.22", "dompurify": "^3.0.5", + "event-source-polyfill": "^1.0.22", "formik": "2.2.9", "hast-util-to-html": "^9.0.1", + "highcharts": "9.2.0", + "highcharts-react-official": "3.0.0", "highlight.js": "^11.8.0", "iconoir-react": "^6.11.0", + "immer": "^9.0.6", "jotai": "^2.6.3", "lang-map": "^0.4.0", "lodash-es": "^4.17.15", @@ -99,12 +107,12 @@ "yup": "^0.29.1" }, "devDependencies": { - "@types/event-source-polyfill": "^1.0.0", "@svgr/webpack": "^8.1.0", "@testing-library/jest-dom": "^5.12.0", "@testing-library/react": "^10.0.3", "@testing-library/react-hooks": "5", "@types/classnames": "^2.2.10", + "@types/event-source-polyfill": "^1.0.0", "@types/jest": "^26.0.15", "@types/lodash-es": "^4.17.3", "@types/masonry-layout": "^4.2.1", @@ -122,6 +130,7 @@ "@typescript-eslint/eslint-plugin": "^5.33.1", "@typescript-eslint/parser": "^5.33.1", "case": "^1.6.3", + "circular-dependency-plugin": "5.2.2", "css-loader": "^6.3.0", "dotenv": "^10.0.0", "eslint": "^7.27.0", @@ -136,8 +145,8 @@ "fork-ts-checker-webpack-plugin": "^6.2.1", "glob": "^7.1.6", "html-webpack-plugin": "^5.3.1", + "husky": "8.0.1", "identity-obj-proxy": "^3.0.0", - "immer": "^10.0.3", "jest": "^26.2.0", "js-yaml": "^4.1.0", "lodash": "^4.17.21", @@ -158,6 +167,7 @@ "typescript": "^4.7.4", "url-loader": "^4.1.1", "webpack": "^5.58.0", + "webpack-bundle-analyzer": "^4.10.2", "webpack-cli": "^5.1.4", "webpack-dev-server": "^4.15.1", "yaml-loader": "^0.6.0" @@ -167,5 +177,18 @@ }, "engines": { "node": ">=14.16.0" + }, + "i18nSettings": { + "extensionToLanguageMap": { + "es": [ + "es" + ], + "en": [ + "en", + "en-US", + "en-IN", + "en-UK" + ] + } } } diff --git a/web/src/AppProps.ts b/web/src/AppProps.ts index f4d5abfc8..ae21cfa88 100644 --- a/web/src/AppProps.ts +++ b/web/src/AppProps.ts @@ -82,4 +82,10 @@ export interface AppProps { isPublicAccessEnabledOnResources: boolean isCurrentSessionPublic: boolean module?: string + + arAppStore?: { + repositoryIdentifier?: string + artifactIdentifier?: string + versionIdentifier?: string + } } diff --git a/web/src/RouteDefinitions.ts b/web/src/RouteDefinitions.ts index ff00da850..a12f4f30d 100644 --- a/web/src/RouteDefinitions.ts +++ b/web/src/RouteDefinitions.ts @@ -15,6 +15,7 @@ */ import { CDERoutes, routes as cdeRoutes } from 'cde-gitness/RouteDefinitions' +import { ARRoutes, routes as arRoutes } from '@ar/gitness/RouteDefinitions' export interface CODEProps { space?: string @@ -63,7 +64,7 @@ export const pathProps: Readonly, 'repoPath' | 'branch' gitspaceId: ':gitspaceId' } -export interface CODERoutes extends CDERoutes { +export interface CODERoutes extends CDERoutes, ARRoutes { toSignIn: () => string toRegister: () => string @@ -139,7 +140,7 @@ export const routes: CODERoutes = { toCODEUserProfile: () => '/profile', toCODEUserChangePassword: () => '/change-password', - toCODERepositories: ({ space }) => `/spaces/${space}`, + toCODERepositories: ({ space }) => `/spaces/${space}/repos`, toCODERepository: ({ repoPath, gitRef, resourcePath }) => `/${repoPath}${gitRef ? '/files/' + gitRef : ''}${resourcePath ? '/~/' + resourcePath : ''}`, toCODEFileEdit: ({ @@ -174,5 +175,6 @@ export const routes: CODERoutes = { toCODEExecutions: ({ repoPath, pipeline }) => `/${repoPath}/pipelines/${pipeline}`, toCODEExecution: ({ repoPath, pipeline, execution }) => `/${repoPath}/pipelines/${pipeline}/execution/${execution}`, toCODESecret: ({ space, secret }) => `/secrets/${space}/secret/${secret}`, - ...cdeRoutes + ...cdeRoutes, + ...arRoutes } diff --git a/web/src/RouteDestinations.tsx b/web/src/RouteDestinations.tsx index a38b3ac0b..f18211e2b 100644 --- a/web/src/RouteDestinations.tsx +++ b/web/src/RouteDestinations.tsx @@ -16,6 +16,7 @@ import React from 'react' import { Route, Switch, BrowserRouter } from 'react-router-dom' +import ArApp from '@ar/gitness/ArApp' import { SignIn } from 'pages/SignIn/SignIn' import { SignUp } from 'pages/SignUp/SignUp' import Repository from 'pages/Repository/Repository' @@ -93,6 +94,14 @@ export const RouteDestinations: React.FC = React.memo(function RouteDestinations + {standalone && ( + + + + + + )} + ) => void + featureFlags: Record +} + +export interface ParentContextObj { + appStoreContext: React.Context + permissionsContext: React.Context> + licenseStoreProvider: React.Context> + tooltipContext?: React.Context> + tokenContext?: React.Context> +} + +export interface Components { + RbacButton: typeof Button + NGBreadcrumbs: typeof NGBreadcrumbs + RbacMenuItem: typeof RbacMenuItem +} + +export interface Hooks { + useDocumentTitle: (title: string | string[]) => { updateTitle: (newTitle: string | string[]) => void } + useLogout: () => { forceLogout: (errorCode?: string) => void } + usePermission: (permissionsRequest?: PermissionsRequest, deps?: Array) => Array +} + +export interface CustomHooks { + useQueryParams: typeof useQueryParams + useUpdateQueryParams: typeof useUpdateQueryParams + useQueryParamsOptions: typeof useQueryParamsOptions + useDefaultPaginationProps: typeof useDefaultPaginationProps + usePreferenceStore: typeof usePreferenceStore + useModalHook: typeof useModalHook + useConfirmationDialog: typeof useConfirmationDialog +} + +export interface CustomComponents { + ModalProvider: typeof ModalProvider + SecretFormInput: typeof SecretFormInput + VulnerabilityView: typeof VulnerabilityView + DependencyView: typeof DependencyView +} + +export interface CustomUtils { + generateToken: () => Promise + getCustomHeaders: () => Record + getApiBaseUrl: (url: string) => string + getRouteDefinitions?: (routeParams: Record) => ARRouteDefinitionsReturn +} + +export interface MFEAppProps { + renderUrl: string + matchPath: string + scope: Scope + customScope: Record + on401: () => void + children?: React.ReactNode + NavComponent?: React.FC + parentContextObj: ParentContextObj + customHooks: CustomHooks + components: Components + customComponents: CustomComponents + customUtils: CustomUtils + hooks: Hooks + parent: Parent + routingId?: string +} diff --git a/web/src/ar/__mocks__/components/DefaultNavComponent.tsx b/web/src/ar/__mocks__/components/DefaultNavComponent.tsx new file mode 100644 index 000000000..6b6b9bf3b --- /dev/null +++ b/web/src/ar/__mocks__/components/DefaultNavComponent.tsx @@ -0,0 +1,24 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' + +const DefaultNavComponent = (props: { children: React.ReactElement }): JSX.Element => { + const { children } = props + return <>{children} +} + +export default DefaultNavComponent diff --git a/web/src/ar/__mocks__/components/DependencyView.tsx b/web/src/ar/__mocks__/components/DependencyView.tsx new file mode 100644 index 000000000..3bcc19cff --- /dev/null +++ b/web/src/ar/__mocks__/components/DependencyView.tsx @@ -0,0 +1,21 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' + +export default function DependencyView() { + return <>Mock Dependency View +} diff --git a/web/src/ar/__mocks__/components/NGBreadcrumbs.tsx b/web/src/ar/__mocks__/components/NGBreadcrumbs.tsx new file mode 100644 index 000000000..5d77896cb --- /dev/null +++ b/web/src/ar/__mocks__/components/NGBreadcrumbs.tsx @@ -0,0 +1,24 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import { defaultTo } from 'lodash-es' +import type { BreadcrumbsProps } from '@harnessio/uicore' +import { Breadcrumbs as UiCoreBreadcrumbs } from '@harnessio/uicore' + +export default function NGBreadcrumbs(props: Partial): React.ReactElement { + return +} diff --git a/web/src/ar/__mocks__/components/RbacMenuItem.tsx b/web/src/ar/__mocks__/components/RbacMenuItem.tsx new file mode 100644 index 000000000..b4b2dbab3 --- /dev/null +++ b/web/src/ar/__mocks__/components/RbacMenuItem.tsx @@ -0,0 +1,31 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import { IMenuItemProps, MaybeElement, MenuItem } from '@blueprintjs/core' +import { Icon, IconName } from '@harnessio/icons' +import type { PermissionsRequest } from '@ar/MFEAppTypes' +import type { PermissionIdentifier } from '@ar/common/permissionTypes' + +interface RbacMenuItemProps extends Omit { + icon?: IconName | MaybeElement + permission?: Omit & { permission: PermissionIdentifier } +} + +export default function RbacMenuItem(props: RbacMenuItemProps) { + const { icon, permission, ...rest } = props + return : null} /> +} diff --git a/web/src/ar/__mocks__/components/SecretFormInput.tsx b/web/src/ar/__mocks__/components/SecretFormInput.tsx new file mode 100644 index 000000000..08eb9b516 --- /dev/null +++ b/web/src/ar/__mocks__/components/SecretFormInput.tsx @@ -0,0 +1,45 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import type { FormikProps } from 'formik' +import { FormInput } from '@harnessio/uicore' +import type { Scope } from '@ar/MFEAppTypes' + +interface SecretFormInputProps { + name: string + scope: Scope + spaceIdFieldName: string + label: React.ReactNode + disabled?: boolean + placeholder?: string + formik?: FormikProps +} + +export default function SecretFormInput(props: SecretFormInputProps) { + const { disabled, name, label, placeholder } = props + return ( + + ) +} diff --git a/web/src/ar/__mocks__/components/VulnerabilityView.tsx b/web/src/ar/__mocks__/components/VulnerabilityView.tsx new file mode 100644 index 000000000..a2f043992 --- /dev/null +++ b/web/src/ar/__mocks__/components/VulnerabilityView.tsx @@ -0,0 +1,21 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' + +export default function VulnerabilityView() { + return <>Mock Vulnerability View +} diff --git a/web/src/ar/__mocks__/contexts/PreferenceStoreContext.tsx b/web/src/ar/__mocks__/contexts/PreferenceStoreContext.tsx new file mode 100644 index 000000000..1cfb20f25 --- /dev/null +++ b/web/src/ar/__mocks__/contexts/PreferenceStoreContext.tsx @@ -0,0 +1,129 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useEffect } from 'react' +import SecureStorage from '@ar/utils/SecureStorage' +import { useLocalStorage } from '@ar/hooks' + +export enum PreferenceScope { + USER = 'USER', + MACHINE = 'MACHINE' // or workstation. This will act as default PreferenceScope +} + +/** + * Preference Store - helps to save ANY user-personalisation info + */ +export interface PreferenceStoreProps { + set(scope: PreferenceScope, entityToPersist: string, value: T): void + get(scope: PreferenceScope, entityToRetrieve: string): T + clear(scope: PreferenceScope, entityToRetrieve: string): void +} + +export interface PreferenceStoreContextProps { + preference: T + setPreference: (value: T) => void + clearPreference: () => void +} + +export const PREFERENCES_TOP_LEVEL_KEY = 'preferences' + +export const PreferenceStoreContext = React.createContext>({ + set: /* istanbul ignore next */ () => void 0, + get: /* istanbul ignore next */ () => void 0, + clear: /* istanbul ignore next */ () => void 0 +}) + +export function usePreferenceStore(scope: PreferenceScope, entity: string): PreferenceStoreContextProps { + const { get, set, clear } = React.useContext(PreferenceStoreContext) + + const preference = get(scope, entity) + const setPreference = set.bind(null, scope, entity) + const clearPreference = clear.bind(null, scope, entity) + + return { preference, setPreference, clearPreference } +} + +const checkAccess = (scope: PreferenceScope, contextArr: (string | undefined)[]): void => { + if (!contextArr || contextArr?.some(val => val === undefined)) { + const error = new Error(`PreferenceStore: Access to "${scope}" scope is not available in the current context.`) + if (__DEV__) { + console.error(error) // eslint-disable-line no-console + } + } +} + +const getKey = (arr: (string | undefined)[], entity: string): string => { + return [...arr, entity].join('/') +} + +export const PreferenceStoreProvider: React.FC = (props: React.PropsWithChildren) => { + const [currentPreferences, setPreferences] = useLocalStorage>(PREFERENCES_TOP_LEVEL_KEY, {}) + const userEmail = SecureStorage.get('email') as string + const [scopeToKeyMap, setScopeToKeyMap] = React.useState({ + [PreferenceScope.USER]: [userEmail], + [PreferenceScope.MACHINE]: [] + }) + + useEffect(() => { + setScopeToKeyMap({ + [PreferenceScope.USER]: [userEmail], + [PreferenceScope.MACHINE]: [] + }) + }, [userEmail]) + + const setPreference = (key: string, value: unknown): void => { + setPreferences(prevState => { + return { ...prevState, [key]: value } + }) + } + + const getPreference = (key: string): any => { + return currentPreferences[key] + } + + const clearPreference = (key: string): void => { + const newPreferences = { ...currentPreferences } + delete newPreferences[key] + setPreferences(newPreferences) + } + + const set = (scope: PreferenceScope, entityToPersist: string, value: unknown): void => { + checkAccess(scope, scopeToKeyMap[scope]) + const key = getKey(scopeToKeyMap[scope], entityToPersist) + setPreference(key, value) + } + + const get = (scope: PreferenceScope, entityToRetrieve: string): unknown => { + const key = getKey(scopeToKeyMap[scope], entityToRetrieve) + return getPreference(key) + } + + const clear = (scope: PreferenceScope, entityToRetrieve: string): void => { + const key = getKey(scopeToKeyMap[scope], entityToRetrieve) + clearPreference(key) + } + + return ( + + {props.children} + + ) +} diff --git a/web/src/ar/__mocks__/hooks/index.ts b/web/src/ar/__mocks__/hooks/index.ts new file mode 100644 index 000000000..7c5825f7e --- /dev/null +++ b/web/src/ar/__mocks__/hooks/index.ts @@ -0,0 +1,21 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export { useQueryParams, useQueryParamsOptions, type UseQueryParamsOptions } from './useQueryParams' +export { useUpdateQueryParams } from './useUpdateQueryParams' +export { useDefaultPaginationProps } from './useDefaultPaginationProps' +export { ModalProvider, useModalHook } from './useModalHook' +export { useConfirmationDialog } from './useConfirmationDialog' diff --git a/web/src/ar/__mocks__/hooks/useConfirmationDialog.tsx b/web/src/ar/__mocks__/hooks/useConfirmationDialog.tsx new file mode 100644 index 000000000..8687f57d2 --- /dev/null +++ b/web/src/ar/__mocks__/hooks/useConfirmationDialog.tsx @@ -0,0 +1,97 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import { Intent } from '@blueprintjs/core' +import { ButtonProps, ConfirmationDialog } from '@harnessio/uicore' +import { useModalHook } from './useModalHook' + +export interface UseConfirmationDialogProps { + titleText: React.ReactNode + contentText: React.ReactNode + cancelButtonText?: React.ReactNode + intent?: Intent + buttonIntent?: ButtonProps['intent'] + confirmButtonText?: React.ReactNode + onCloseDialog?: (isConfirmed: boolean) => void + customButtons?: React.ReactNode + showCloseButton?: boolean + canOutsideClickClose?: boolean + canEscapeKeyClose?: boolean + children?: JSX.Element + className?: string + persistDialog?: boolean +} + +export interface UseConfirmationDialogReturn { + openDialog: () => void + closeDialog: () => void +} + +export const useConfirmationDialog = (props: UseConfirmationDialogProps): UseConfirmationDialogReturn => { + const { + titleText, + contentText, + cancelButtonText, + intent = Intent.NONE, + buttonIntent = Intent.PRIMARY, + confirmButtonText, + onCloseDialog, + customButtons, + showCloseButton, + canOutsideClickClose, + canEscapeKeyClose, + children, + className, + persistDialog + } = props + + const [showModal, hideModal] = useModalHook(() => { + return ( + + {children} + + ) + }, [props]) + + const onClose = React.useCallback( + (isConfirmed: boolean): void => { + onCloseDialog?.(isConfirmed) + hideModal() + if (persistDialog) showModal() + if (!isConfirmed) hideModal() + }, + [hideModal, onCloseDialog, persistDialog] + ) + + return { + openDialog: () => showModal(), + closeDialog: () => hideModal() + } +} diff --git a/web/src/ar/__mocks__/hooks/useDefaultPaginationProps.ts b/web/src/ar/__mocks__/hooks/useDefaultPaginationProps.ts new file mode 100644 index 000000000..8debc3861 --- /dev/null +++ b/web/src/ar/__mocks__/hooks/useDefaultPaginationProps.ts @@ -0,0 +1,43 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { PopoverPosition } from '@blueprintjs/core' +import type { PaginationProps } from '@harnessio/uicore' +import { PAGE_SIZE_OPTIONS } from '@ar/constants' +import { useUpdateQueryParams } from './useUpdateQueryParams' + +export type CommonPaginationQueryParams = { + page?: number + size?: number +} + +export const useDefaultPaginationProps = (props: PaginationProps): PaginationProps => { + const { updateQueryParams } = useUpdateQueryParams() + + return { + gotoPage: page => updateQueryParams({ page }), + onPageSizeChange: size => updateQueryParams({ page: 0, size }), + showPagination: true, + pageSizeDropdownProps: { + usePortal: true, + popoverProps: { + position: PopoverPosition.TOP + } + }, + pageSizeOptions: PAGE_SIZE_OPTIONS, + ...props + } +} diff --git a/web/src/ar/__mocks__/hooks/useModalHook.tsx b/web/src/ar/__mocks__/hooks/useModalHook.tsx new file mode 100644 index 000000000..cd713248c --- /dev/null +++ b/web/src/ar/__mocks__/hooks/useModalHook.tsx @@ -0,0 +1,145 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useEffect, useState, useCallback, useMemo, memo, useContext } from 'react' +import ReactDOM from 'react-dom' +import { noop } from 'lodash-es' + +type ModalType = React.FunctionComponent + +interface ModalContextType { + showModal(key: string, component: ModalType): void + hideModal(key: string): void +} + +const ModalContext = React.createContext({ + showModal: noop, + hideModal: noop +}) + +interface ModalRootProps { + modals: Record + component?: React.ComponentType + container?: Element +} + +interface ModalRendererProps { + component: ModalType +} + +const ModalRenderer = memo(({ component, ...rest }: ModalRendererProps) => component(rest)) +ModalRenderer.displayName = 'ModalRenderer' + +const ModalRoot = memo(({ modals, container, component: RootComponent = React.Fragment }: ModalRootProps) => { + const [mountNode, setMountNode] = useState(undefined) + + useEffect(() => { + setMountNode(container || document.body) + }, [container]) + + return mountNode + ? ReactDOM.createPortal( + + {Object.keys(modals).map(key => ( + + ))} + , + mountNode + ) + : null +}) + +ModalRoot.displayName = 'ModalRoot' + +interface ModalProviderProps { + container?: Element + rootComponent?: React.ComponentType + children: React.ReactNode +} + +export const ModalProvider = ({ container, rootComponent, children }: ModalProviderProps) => { + if (container && !(container instanceof HTMLElement)) { + throw new Error('Container must specify DOM element to mount modal root into.') + } + const [modals, setModals] = useState>({}) + const showModal = useCallback( + (key: string, modal: ModalType) => + setModals(_modals => ({ + ..._modals, + [key]: modal + })), + [] + ) + const hideModal = useCallback( + (key: string) => + setModals(_modals => { + const newModals = { ..._modals } + delete newModals[key] + return newModals + }), + [] + ) + const contextValue = useMemo(() => ({ showModal, hideModal }), []) // eslint-disable-line react-hooks/exhaustive-deps + + return ( + + + {children} + + + + ) +} + +type ShowModal = () => void +type HideModal = () => void + +const generateModalKey = (() => { + let count = 0 + return () => `${++count}` +})() + +const isFunctionalComponent = (Component: React.FunctionComponent) => { + const prototype = Component.prototype + return !prototype || !prototype.isReactComponent +} + +export const useModalHook = (component: ModalType, inputs: unknown[] = []): [ShowModal, HideModal] => { + if (!isFunctionalComponent(component)) { + throw new Error( + 'Only stateless components can be used as an argument to useModal. You have probably passed a class component where a function was expected.' + ) + } + + const key = useMemo(generateModalKey, []) + const modal = useMemo(() => component, inputs) // eslint-disable-line react-hooks/exhaustive-deps + const context = useContext(ModalContext) + const [isShown, setShown] = useState(false) + const showModal = useCallback(() => setShown(true), []) + const hideModal = useCallback(() => setShown(false), []) + + useEffect(() => { + if (isShown) { + context.showModal(key, modal) + } else { + context.hideModal(key) + } + + return () => context.hideModal(key) + }, [modal, isShown]) // eslint-disable-line react-hooks/exhaustive-deps + + return [showModal, hideModal] +} diff --git a/web/src/ar/__mocks__/hooks/useQueryParams.ts b/web/src/ar/__mocks__/hooks/useQueryParams.ts new file mode 100644 index 000000000..6acfd32b2 --- /dev/null +++ b/web/src/ar/__mocks__/hooks/useQueryParams.ts @@ -0,0 +1,122 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useEffect, useMemo, useRef } from 'react' +import { useLocation } from 'react-router-dom' +import qs from 'qs' +import type { IParseOptions } from 'qs' +import { assignWith, get, isNil, set } from 'lodash-es' + +export interface UseQueryParamsOptions extends IParseOptions { + processQueryParams?(data: any): T +} + +export function useQueryParams(options?: UseQueryParamsOptions): T { + const { search } = useLocation() + + const queryParams = React.useMemo(() => { + const params = qs.parse(search, { ignoreQueryPrefix: true, ...options }) + + if (typeof options?.processQueryParams === 'function') { + return options.processQueryParams(params) + } + + return params + }, [search, options]) + + return queryParams as unknown as T +} + +type CustomQsDecoderOptions = { + parseNumbers?: boolean + parseBoolean?: boolean + ignoreNull?: boolean + ignoreEmptyString?: boolean +} + +type CustomQsDecoder = (customQsDecoderOptions?: CustomQsDecoderOptions) => IParseOptions['decoder'] + +/** + * By default, all values are parsed as strings by qs, except for arrays and objects + * This is optional decoder that automatically transforms to numbers, booleans and null + */ +export const queryParamDecodeAll: CustomQsDecoder = + ({ parseNumbers = true, parseBoolean = true, ignoreNull = true, ignoreEmptyString = true } = {}) => + (value, decoder) => { + if (parseNumbers && /^(\d+|\d*\.\d+)$/.test(value)) { + return parseFloat(value) + } + + if (ignoreEmptyString && value.length === 0) { + return + } + + const keywords: Record = { + null: ignoreNull ? undefined : null, + undefined: undefined + } + + if (value in keywords) { + return keywords[value] + } + + const booleanKeywords: Record = { + true: true, + false: false + } + + if (parseBoolean && value in booleanKeywords) { + return booleanKeywords[value] + } + + return decoder(value) + } + +// list of params that should be converted back to strings +// if searchTerm is '123', queryParamDecodeAll converts it to a number, but searchTerm should remain a string +const ignoreList = ['searchTerm'] + +// This uses queryParamDecodeAll as the decoder and assigns the value from default params if the processed param's value is null/undefined. +export const useQueryParamsOptions = ( + defaultParams: { [K in DKey]: NonNullable }, + decoderOptions?: CustomQsDecoderOptions +): UseQueryParamsOptions>> => { + const defaultParamsRef = useRef(defaultParams) + useEffect(() => { + defaultParamsRef.current = defaultParams + }, [defaultParams]) + + const options = useMemo( + () => ({ + decoder: queryParamDecodeAll(decoderOptions), + processQueryParams: (params: Q) => { + const processedParams = { ...params } + + ignoreList.forEach(param => { + if (!isNil(get(processedParams, param))) { + set(processedParams, param, get(processedParams, param).toString()) + } + }) + + return assignWith(processedParams, defaultParamsRef.current, (objValue, srcValue) => + isNil(objValue) ? srcValue : objValue + ) as Required> + } + }), + [] + ) + return options +} diff --git a/web/src/ar/__mocks__/hooks/useUpdateQueryParams.ts b/web/src/ar/__mocks__/hooks/useUpdateQueryParams.ts new file mode 100644 index 000000000..e70a76f4a --- /dev/null +++ b/web/src/ar/__mocks__/hooks/useUpdateQueryParams.ts @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { useCallback, useEffect, useRef } from 'react' +import qs from 'qs' +import { isEmpty } from 'lodash-es' +import type { IStringifyOptions } from 'qs' +import { useLocation, useHistory } from 'react-router-dom' + +import { useQueryParams } from './useQueryParams' + +export interface UseUpdateQueryParamsReturn { + updateQueryParams(values: T, options?: IStringifyOptions, replaceHistory?: boolean): void + replaceQueryParams(values: T, options?: IStringifyOptions, replaceHistory?: boolean): void +} + +export function useUpdateQueryParams>(): UseUpdateQueryParamsReturn { + const { pathname } = useLocation() + const { push, replace } = useHistory() + const queryParams = useQueryParams() + + // queryParams, pathname are stored in refs so that + // updateQueryParams/replaceQueryParams can be memoized without changing too often + const ref = useRef({ queryParams, pathname }) + useEffect(() => { + ref.current = { + queryParams, + pathname + } + }, [queryParams, pathname]) + + return { + updateQueryParams: useCallback( + (values: T, options?: IStringifyOptions, replaceHistory?: boolean): void => { + const path = `${ref.current.pathname}?${qs.stringify({ ...ref.current.queryParams, ...values }, options)}` + replaceHistory ? replace(path) : push(path) + }, + [push, replace] + ), + replaceQueryParams: useCallback( + (values: T, options?: IStringifyOptions, replaceHistory?: boolean): void => { + if (isEmpty(values)) { + ref.current = { + ...ref.current, + queryParams: {} as T + } + } + const path = `${ref.current.pathname}?${qs.stringify(values, options)}` + replaceHistory ? replace(path) : push(path) + }, + [push, replace] + ) + } +} diff --git a/web/src/ar/__mocks__/utils/getApiBaseUrl.tsx b/web/src/ar/__mocks__/utils/getApiBaseUrl.tsx new file mode 100644 index 000000000..8396c28a9 --- /dev/null +++ b/web/src/ar/__mocks__/utils/getApiBaseUrl.tsx @@ -0,0 +1,21 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export function getApiBaseUrl(url: string) { + const apiPrefix = '/api/v1' + const finalUrl = `${apiPrefix}${url}` + return finalUrl +} diff --git a/web/src/ar/__mocks__/utils/getCustomHeaders.tsx b/web/src/ar/__mocks__/utils/getCustomHeaders.tsx new file mode 100644 index 000000000..2235cd523 --- /dev/null +++ b/web/src/ar/__mocks__/utils/getCustomHeaders.tsx @@ -0,0 +1,19 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export default function getCustomHeaders() { + return {} +} diff --git a/web/src/ar/app/App.tsx b/web/src/ar/app/App.tsx new file mode 100644 index 000000000..dc3d66ed6 --- /dev/null +++ b/web/src/ar/app/App.tsx @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { Suspense, useEffect, useRef } from 'react' +import { Page } from '@harnessio/uicore' +import { HARServiceAPIClient } from '@harnessio/react-har-service-client' +import { QueryClientProvider } from '@tanstack/react-query' + +import { StringsContextProvider } from '@ar/frameworks/strings/StringsContextProvider' +import { AppStoreContext } from '@ar/contexts/AppStoreContext' +import ParentProvider from '@ar/contexts/ParentProvider' +import type { ParentProviderProps } from '@ar/contexts/ParentProvider' +import { queryClient } from '@ar/utils/queryClient' + +import { Parent } from '@ar/common/types' +import strings from '@ar/strings/strings.en.yaml' +import type { MFEAppProps } from '@ar/MFEAppTypes' +import DefaultNavComponent from '@ar/__mocks__/components/DefaultNavComponent' +import AppErrorBoundary from '@ar/components/AppErrorBoundary/AppErrorBoundary' + +import css from '@ar/app/app.module.scss' + +const RouteDestinations = React.lazy(() => import('@ar/routes/RouteDestinations')) + +export default function ChildApp(props: MFEAppProps): React.ReactElement { + const { + renderUrl, + parentContextObj, + components, + scope, + customScope, + hooks, + customHooks, + NavComponent = DefaultNavComponent, + customComponents, + parent, + customUtils, + matchPath, + on401 + } = props + + const { ModalProvider } = customComponents + + const appStoreData = React.useContext(parentContextObj.appStoreContext) + useRef( + new HARServiceAPIClient({ + responseInterceptor: (response: Response): Response => { + if (!response.ok && response.status === 401) { + on401() + } + + return response + }, + urlInterceptor: (url: string) => { + return customUtils.getApiBaseUrl(url) + }, + requestInterceptor(request) { + request.headers.delete('Authorization') + // add custom headers if available + const customHeader = customUtils.getCustomHeaders() + Object.entries(customHeader).map(([key, value]) => { + request.headers.set(key, value) + }) + return request + } + }) + ) + + useEffect( + () => () => { + if (typeof appStoreData.updateAppStore === 'function' && parent !== Parent.Enterprise) { + appStoreData.updateAppStore({}) + } + }, + [] + ) + + return ( + + + + + + + + + + + }> + + + + + + + + + + ) +} diff --git a/web/src/ar/app/EnterpriseApp.tsx b/web/src/ar/app/EnterpriseApp.tsx new file mode 100644 index 000000000..912f5ffb6 --- /dev/null +++ b/web/src/ar/app/EnterpriseApp.tsx @@ -0,0 +1,29 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' + +import type { MFEAppProps } from '@ar/MFEAppTypes' +import App from '@ar/app/App' +import { Parent } from '@ar/common/types' + +import '@ar/styles/uicore.scss' + +function EnterpriseApp(props: MFEAppProps) { + return +} + +export default EnterpriseApp diff --git a/web/src/ar/app/GitnessApp.tsx b/web/src/ar/app/GitnessApp.tsx new file mode 100644 index 000000000..f40b31613 --- /dev/null +++ b/web/src/ar/app/GitnessApp.tsx @@ -0,0 +1,121 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { createContext } from 'react' +import { defaultTo, noop } from 'lodash-es' +import { Button } from '@harnessio/uicore' + +import type { MFEAppProps } from '@ar/MFEAppTypes' + +import { + ModalProvider, + useConfirmationDialog, + useDefaultPaginationProps, + useModalHook, + useQueryParams, + useQueryParamsOptions, + useUpdateQueryParams +} from '@ar/__mocks__/hooks' +import RbacMenuItem from '@ar/__mocks__/components/RbacMenuItem' +import NGBreadcrumbs from '@ar/__mocks__/components/NGBreadcrumbs' +import DependencyView from '@ar/__mocks__/components/DependencyView' +import SecretFormInput from '@ar/__mocks__/components/SecretFormInput' +import VulnerabilityView from '@ar/__mocks__/components/VulnerabilityView' +import { PreferenceStoreProvider, usePreferenceStore } from '@ar/__mocks__/contexts/PreferenceStoreContext' +import { Parent } from '@ar/common/types' +import App from '@ar/app/App' + +import '@ar/styles/App.scss' +import getCustomHeaders from '@ar/__mocks__/utils/getCustomHeaders' +import { getApiBaseUrl } from '@ar/__mocks__/utils/getApiBaseUrl' + +const GitnessApp = (props: Partial): JSX.Element => { + const { + NavComponent, + renderUrl, + matchPath, + scope, + customScope, + parentContextObj, + components, + hooks, + customHooks, + customComponents, + customUtils, + parent, + on401 + } = props + return ( + + ({ updateTitle: () => void 0 }), + useLogout: () => ({ forceLogout: () => void 0 }), + usePermission: () => [true] + }, + hooks + )} + customHooks={Object.assign( + { + useQueryParams, + useUpdateQueryParams, + useQueryParamsOptions, + useDefaultPaginationProps, + usePreferenceStore, + useModalHook, + useConfirmationDialog + }, + customHooks + )} + customComponents={Object.assign( + { + ModalProvider + }, + customComponents + )} + customUtils={Object.assign( + { + getCustomHeaders, + getApiBaseUrl + }, + customUtils + )} + on401={defaultTo(on401, noop)} + /> + + ) +} + +export default GitnessApp diff --git a/web/src/ar/app/app.module.scss b/web/src/ar/app/app.module.scss new file mode 100644 index 000000000..29942f1db --- /dev/null +++ b/web/src/ar/app/app.module.scss @@ -0,0 +1,20 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.pageBody { + --page-header-height: 0px; + background-color: var(--primary-bg) !important; +} diff --git a/web/src/ar/app/app.module.scss.d.ts b/web/src/ar/app/app.module.scss.d.ts new file mode 100644 index 000000000..ed71a64c0 --- /dev/null +++ b/web/src/ar/app/app.module.scss.d.ts @@ -0,0 +1,19 @@ +/* + * Copyright 2023 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* eslint-disable */ +// This is an auto-generated file +export declare const pageBody: string diff --git a/web/src/ar/bootstrap.tsx b/web/src/ar/bootstrap.tsx new file mode 100644 index 000000000..978357430 --- /dev/null +++ b/web/src/ar/bootstrap.tsx @@ -0,0 +1,31 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* eslint-disable @typescript-eslint/no-explicit-any */ +import React from 'react' +import ReactDOM from 'react-dom' +import { BrowserRouter } from 'react-router-dom' + +import RouteWithSideNav from '@ar/components/RouteWithSideNav/RouteWithSideNav' +import GitnessApp from '@ar/app/GitnessApp' +import { Parent } from './common/types' + +ReactDOM.render( + + + , + document.getElementById('react-root') +) diff --git a/web/src/ar/common/constants.ts b/web/src/ar/common/constants.ts new file mode 100644 index 000000000..8f107d5e8 --- /dev/null +++ b/web/src/ar/common/constants.ts @@ -0,0 +1,82 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type { IconName } from '@harnessio/icons' +import type { StringsMap } from '@ar/frameworks/strings' +import { RepositoryPackageType } from './types' + +export interface RepositoryTypeListItem { + label: keyof StringsMap + value: RepositoryPackageType + icon: IconName + disabled?: boolean + tooltip?: string +} + +export const RepositoryTypes: RepositoryTypeListItem[] = [ + { + label: 'repositoryTypes.docker', + value: RepositoryPackageType.DOCKER, + icon: 'docker-step' + }, + { + label: 'repositoryTypes.helm', + value: RepositoryPackageType.HELM, + icon: 'service-helm' + }, + { + label: 'repositoryTypes.generic', + value: RepositoryPackageType.GENERIC, + icon: 'generic-repository-type', + tooltip: 'Coming Soon!', + disabled: true + }, + { + label: 'repositoryTypes.maven', + value: RepositoryPackageType.MAVEN, + icon: 'maven-repository-type', + tooltip: 'Coming Soon!', + disabled: true + }, + { + label: 'repositoryTypes.npm', + value: RepositoryPackageType.NPM, + icon: 'npm-repository-type', + tooltip: 'Coming Soon!', + disabled: true + }, + { + label: 'repositoryTypes.gradle', + value: RepositoryPackageType.GRADLE, + icon: 'gradle-repository-type', + tooltip: 'Coming Soon!', + disabled: true + }, + { + label: 'repositoryTypes.pypi', + value: RepositoryPackageType.PYPI, + icon: 'python', + tooltip: 'Coming Soon!', + disabled: true + }, + { + label: 'repositoryTypes.nuget', + value: RepositoryPackageType.NUGET, + icon: 'nuget-repository-type', + tooltip: 'Coming Soon!', + disabled: true + } +] diff --git a/web/src/ar/common/dateUtils.ts b/web/src/ar/common/dateUtils.ts new file mode 100644 index 000000000..d5c6beb07 --- /dev/null +++ b/web/src/ar/common/dateUtils.ts @@ -0,0 +1,24 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import moment from 'moment' + +export const getReadableDateTime = (timestamp?: number, formatString = 'MMM DD, YYYY'): string => { + if (!timestamp) { + return '' + } + return moment(timestamp).format(formatString) +} diff --git a/web/src/ar/common/permissionTypes.ts b/web/src/ar/common/permissionTypes.ts new file mode 100644 index 000000000..6e3f193f3 --- /dev/null +++ b/web/src/ar/common/permissionTypes.ts @@ -0,0 +1,24 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export enum PermissionIdentifier { + DELETE_SERVICE = 'core_service_delete', + EDIT_SERVICE = 'core_service_edit' +} + +export enum ResourceType { + SERVICE = 'SERVICE' +} diff --git a/web/src/ar/common/types.ts b/web/src/ar/common/types.ts new file mode 100644 index 000000000..a757d6e37 --- /dev/null +++ b/web/src/ar/common/types.ts @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type { FormikProps } from 'formik' + +export enum Parent { + OSS = 'OSS', + Enterprise = 'Enterprise' +} + +export type FormikRef = Pick, 'submitForm' | 'errors'> + +export type FormikFowardRef = + | ((instance: FormikRef | null) => void) + | React.MutableRefObject | null> + | null + +export enum EnvironmentType { + Production = 'Production', + PreProduction = 'PreProduction' +} + +export enum RepositoryPackageType { + DOCKER = 'DOCKER', + HELM = 'HELM', + GENERIC = 'GENERIC', + MAVEN = 'MAVEN', + NPM = 'NPM', + GRADLE = 'GRADLE', + PYPI = 'PYPI', + NUGET = 'NUGET' +} + +export enum RepositoryConfigType { + VIRTUAL = 'VIRTUAL', + UPSTREAM = 'UPSTREAM' +} + +export enum PageType { + Details = 'Details', + Table = 'Table' +} diff --git a/web/src/ar/common/utils.ts b/web/src/ar/common/utils.ts new file mode 100644 index 000000000..e2f6ebdc6 --- /dev/null +++ b/web/src/ar/common/utils.ts @@ -0,0 +1,43 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type { SyntheticEvent } from 'react' +import type { FormikProps } from 'formik' +import { isEmpty } from 'lodash-es' +import type { FormikFowardRef, RepositoryPackageType } from './types' + +export function setFormikRef(ref: FormikFowardRef, formik: FormikProps): void { + if (!ref) return + + if (typeof ref === 'function') { + return + } + + ref.current = formik as unknown as FormikProps +} + +export function getIdentifierStringForBreadcrumb(label: string, value: string): string { + return `${label}: ${value}` +} + +export function killEvent(e: React.MouseEvent | SyntheticEvent | undefined): void { + // do not add preventDefault here, that works odd with checkbox selection + e?.stopPropagation() +} + +export function getPackageTypesForApiQueryParams(packageTypes: RepositoryPackageType[]): string | undefined { + return isEmpty(packageTypes) ? undefined : packageTypes.join(',') +} diff --git a/web/src/ar/components/AddPatternList/AddPatternList.module.scss b/web/src/ar/components/AddPatternList/AddPatternList.module.scss new file mode 100644 index 000000000..28fe3193e --- /dev/null +++ b/web/src/ar/components/AddPatternList/AddPatternList.module.scss @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.patternsContainer { + align-items: flex-start; + .patternContainer { + .patternInput { + width: 324px; + margin-bottom: 0px !important; + } + } + .patternsListContainer { + width: 100% !important; + margin-top: var(--spacing-small) !important; + } + .addPatternBtn { + padding: 0px !important; + font-size: 12px !important; + } +} diff --git a/web/src/ar/components/AddPatternList/AddPatternList.module.scss.d.ts b/web/src/ar/components/AddPatternList/AddPatternList.module.scss.d.ts new file mode 100644 index 000000000..368ba9ff9 --- /dev/null +++ b/web/src/ar/components/AddPatternList/AddPatternList.module.scss.d.ts @@ -0,0 +1,23 @@ +/* + * Copyright 2023 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* eslint-disable */ +// This is an auto-generated file +export declare const addPatternBtn: string +export declare const patternContainer: string +export declare const patternInput: string +export declare const patternsContainer: string +export declare const patternsListContainer: string diff --git a/web/src/ar/components/AddPatternList/AddPatternList.tsx b/web/src/ar/components/AddPatternList/AddPatternList.tsx new file mode 100644 index 000000000..6c2d0fcc6 --- /dev/null +++ b/web/src/ar/components/AddPatternList/AddPatternList.tsx @@ -0,0 +1,86 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import { get } from 'lodash-es' +import classNames from 'classnames' +import type { FormikProps } from 'formik' +import { FontVariation } from '@harnessio/design-system' +import { Button, FormInput, Layout, Text } from '@harnessio/uicore' + +import css from './AddPatternList.module.scss' + +interface AddPatternListProps { + name: string + formikProps: FormikProps + onAdd: (val: string) => void + onRemove: (idx: number) => void + label: string + placeholder: string + addButtonLabel: string + disabled: boolean + className?: string +} + +export default function AddPatternList({ + name, + formikProps, + onRemove, + onAdd, + label, + placeholder, + addButtonLabel, + disabled, + className +}: AddPatternListProps): JSX.Element { + const value = get(formikProps.values, name) + return ( + + {label} + + {value?.map((_each: string, index: number) => ( + + + + ) +} + +interface RepositoryLocationBadgeProps { + value: RepositoryConfigType +} + +export const RepositoryLocationBadgeCell = ({ value }: RepositoryLocationBadgeProps): JSX.Element => { + return +} + +export const SizeCell = ({ value }: CommonCellProps): JSX.Element => { + const { getString } = useStrings() + return {defaultTo(value, getString('na'))} +} + +export const CountCell = ({ value, icon, iconProps }: CountCellProps): JSX.Element => { + const _iconProps = defaultTo(iconProps, DefaultIconProps) + return ( + + {defaultTo(value, 0)} + + ) +} + +export const TextCell = ({ value }: CommonCellProps): JSX.Element => { + const { getString } = useStrings() + return ( + + {defaultTo(value, getString('na'))} + + ) +} + +export interface ToggleAccordionCellProps { + expandedRows: Set + setExpandedRows: React.Dispatch>> + value: string + initialIsExpanded: boolean + getToggleRowExpandedProps: (props?: Partial) => TableExpandedToggleProps + onToggleRowExpanded: (val: boolean) => void +} + +const ToggleAccordionCell = (props: ToggleAccordionCellProps): JSX.Element => { + const { expandedRows, setExpandedRows, value } = props + const [isExpanded, setIsExpanded] = React.useState(props.initialIsExpanded) + + React.useEffect(() => { + if (value) { + const isRowExpanded = expandedRows.has(value) + setIsExpanded(isRowExpanded) + props.onToggleRowExpanded(isRowExpanded) + } + }, [value, expandedRows, props.onToggleRowExpanded]) + + const toggleRow = (evt: React.MouseEvent): void => { + killEvent(evt) + setExpandedRows(handleToggleExpandableRow(value)) + } + + return ( + + + + {token && ( + <> +
+ + + )} + + ) +} diff --git a/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss new file mode 100644 index 000000000..b3491ca92 --- /dev/null +++ b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.pageBody { + --page-header-height: 0px; +} + +.stepGridContainer { + display: grid; + grid-template-columns: max-content auto; + gap: var(--spacing-medium); + align-items: center; + + .label { + text-transform: uppercase; + color: var(--primary-7) !important; + } + + .generateTokenBtn { + padding: 0px !important; + } +} + +.titleContainer { + position: sticky; + top: 0px; + background-color: var(--grey-0) !important; + padding-left: var(--spacing-xxlarge) !important; + padding-right: var(--spacing-xxlarge) !important; + padding-top: var(--spacing-xxlarge) !important; + padding-bottom: var(--spacing-medium) !important; + border-bottom: 1px solid var(--grey-200) !important; +} + +.contentContainer { + padding-left: var(--spacing-xxlarge) !important; + padding-right: var(--spacing-xxlarge) !important; + padding-top: 0px !important; + padding-bottom: var(--spacing-xxlarge) !important; +} diff --git a/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss.d.ts b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss.d.ts new file mode 100644 index 000000000..9f935a44e --- /dev/null +++ b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.module.scss.d.ts @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* eslint-disable */ +// This is an auto-generated file +export declare const contentContainer: string +export declare const generateTokenBtn: string +export declare const label: string +export declare const pageBody: string +export declare const stepGridContainer: string +export declare const titleContainer: string diff --git a/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.tsx b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.tsx new file mode 100644 index 000000000..04e960dec --- /dev/null +++ b/web/src/ar/pages/repository-details/components/SetupClientContent/SetupClientContent.tsx @@ -0,0 +1,115 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import { FontVariation } from '@harnessio/design-system' +import { Button, ButtonVariation, Container, Layout, Page, Text } from '@harnessio/uicore' +import { PackageType, useGetClientSetupDetailsQuery } from '@harnessio/react-har-service-client' + +import { useGetSpaceRef } from '@ar/hooks' +import { useStrings } from '@ar/frameworks/strings' +import type { RepositoryPackageType } from '@ar/common/types' +import RepositoryIcon from '@ar/frameworks/RepositoryStep/RepositoryIcon' +import CommandBlock from '@ar/components/CommandBlock/CommandBlock' + +import GenerateTokenStep from './GenerateTokenStep' +import { ClientSetupStepTypeEnum } from './types' + +import css from './SetupClientContent.module.scss' + +interface SetupClientContentProps { + onClose: () => void + repoKey: string + artifactKey?: string + versionKey?: string + packageType: PackageType +} + +const combineCommands = (list: string[]): string => { + return list.join('\n') +} + +export default function SetupClientContent(props: SetupClientContentProps): JSX.Element { + const { onClose, packageType, repoKey } = props + const { getString } = useStrings() + const spaceRef = useGetSpaceRef(repoKey) + + const { + isFetching: loading, + data, + error, + refetch + } = useGetClientSetupDetailsQuery({ + registry_ref: spaceRef, + queryParams: { + artifact: props.artifactKey, + version: props.versionKey + } + }) + + const responseData = data?.content.data + + return ( + refetch()}> + {responseData && ( + + + + {responseData.mainHeader} + + + {responseData.secHeader} + {responseData.sections.map((section, index) => ( + + {section.header} + {section.steps?.map((step, stepIndex) => { + if (step.type === ClientSetupStepTypeEnum.GenerateToken) { + return + } + return ( + + + {getString('repositoryDetails.clientSetup.step', { stepIndex: stepIndex + 1 })} + + {step.header} + {step.commands && ( + <> +
+ + + )} + + ) + })} + + ))} + + +
+ } + breadcrumbs={} + /> + +
+ + { + updateQueryParams({ repositoryTypes: val, page: DEFAULT_PAGE_INDEX }) + }} + /> + + { + updateQueryParams({ searchTerm: text || undefined, page: DEFAULT_PAGE_INDEX }) + }} + defaultValue={searchTerm} + ref={searchRef} + /> +
+
+ refetch()} + noData={{ + when: () => !responseData?.registries?.length, // TODO: change to itemCount once BE fixes the issue with paginated response + icon: 'thinner-code-repos', + // image: getEmptyStateIllustration(hasFilter, module), + messageTitle: hasFilter ? getString('noResultsFound') : getString('repositoryList.table.noRepositoriesTitle'), + button: hasFilter ? ( +