first commit
This commit is contained in:
commit
fdc3829040
5 changed files with 534 additions and 0 deletions
48
README.md
Normal file
48
README.md
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# delete-docker-registry-image
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
curl https://raw.githubusercontent.com/burnettk/delete-docker-registry-image/master/delete_docker_registry_image.py | sudo tee /usr/local/bin/delete_docker_registry_image >/dev/null
|
||||||
|
sudo chmod a+x /usr/local/bin/delete_docker_registry_image
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
Set up your data directory via an environment variable:
|
||||||
|
|
||||||
|
export REGISTRY_DATA_DIR=/opt/registry_data/docker/registry/v2
|
||||||
|
|
||||||
|
You can also just edit the script where this variable is set to make it work
|
||||||
|
for your setup.
|
||||||
|
|
||||||
|
Almost delete a repo:
|
||||||
|
|
||||||
|
delete_docker_registry_image --image testrepo/awesomeimage --dry-run
|
||||||
|
|
||||||
|
Actually delete a repo (remember to shut down your registry first):
|
||||||
|
|
||||||
|
delete_docker_registry_image --image testrepo/awesomeimage
|
||||||
|
|
||||||
|
Delete one tag from a repo:
|
||||||
|
|
||||||
|
delete_docker_registry_image --image testrepo/awesomeimage:supertag
|
||||||
|
|
||||||
|
|
||||||
|
## clean_old_versions.py
|
||||||
|
|
||||||
|
This complimentary script is made to remove tags in repository based on
|
||||||
|
regexp pattern.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
./clean_old_versions.py --image reg_exp_of_repository_to_find --include reg_exp_of_tag_to_find -l history_to_maintain --registry-url location_of_docker_registry -o tag_ordering -b only_tags_before_date -a only_tags_after_date
|
||||||
|
|
||||||
|
Example:
|
||||||
|
Search for all images whose name start with 'repo/sitor' and delete all tags
|
||||||
|
whose name start with '0.1.' keeping the last 2 tags and of the remaining tags
|
||||||
|
deletes only those having an image creation time between January 1, 2016 12 a.m.
|
||||||
|
and June 25, 2016 12 p.m. (both datetimes are exclusive).
|
||||||
|
|
||||||
|
./clean_old_versions.py --image '^repo/sitor*' --include '^0.1.*' -l 2 -b 2016-06-25T12:00:00 -a 2016-01-01T00:00:00 --registry-url http://localhost:5000
|
||||||
|
|
||||||
|
Add `--dry-run` as argument for a test run without actual removal of tags.
|
||||||
|
|
20
config.yml
Normal file
20
config.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
version: 0.1
|
||||||
|
log:
|
||||||
|
fields:
|
||||||
|
service: registry
|
||||||
|
storage:
|
||||||
|
cache:
|
||||||
|
blobdescriptor: inmemory
|
||||||
|
filesystem:
|
||||||
|
rootdirectory: /var/lib/registry
|
||||||
|
http:
|
||||||
|
addr: :5000
|
||||||
|
headers:
|
||||||
|
X-Content-Type-Options: [nosniff]
|
||||||
|
Access-Control-Allow-Origin: ['*']
|
||||||
|
health:
|
||||||
|
storagedriver:
|
||||||
|
enabled: true
|
||||||
|
interval: 10s
|
||||||
|
threshold: 3
|
||||||
|
|
7
del_images.sh
Executable file
7
del_images.sh
Executable file
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/bash
|
||||||
|
sudo bash <<EOF
|
||||||
|
docker stop registry-server
|
||||||
|
export REGISTRY_DATA_DIR=/home/nvme/dockerdata/registry/docker/registry/v2
|
||||||
|
delete_docker_registry_image --image $1
|
||||||
|
docker start registry-server
|
||||||
|
EOF
|
421
delete_docker_registry_image
Executable file
421
delete_docker_registry_image
Executable file
|
@ -0,0 +1,421 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
Shut down your registry service to avoid race conditions and possible data loss
|
||||||
|
and then run the command with an image repo like this:
|
||||||
|
delete_docker_registry_image.py --image awesomeimage --dry-run
|
||||||
|
"""
|
||||||
|
|
||||||
|
#registry_data_dir = "/home/nvme/dockerdata/registry/docker/registry/v2"
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
import glob
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def del_empty_dirs(s_dir, top_level):
|
||||||
|
"""recursively delete empty directories"""
|
||||||
|
b_empty = True
|
||||||
|
|
||||||
|
for s_target in os.listdir(s_dir):
|
||||||
|
s_path = os.path.join(s_dir, s_target)
|
||||||
|
if os.path.isdir(s_path):
|
||||||
|
if not del_empty_dirs(s_path, False):
|
||||||
|
b_empty = False
|
||||||
|
else:
|
||||||
|
b_empty = False
|
||||||
|
|
||||||
|
if b_empty:
|
||||||
|
logger.debug("Deleting empty directory '%s'", s_dir)
|
||||||
|
if not top_level:
|
||||||
|
os.rmdir(s_dir)
|
||||||
|
|
||||||
|
return b_empty
|
||||||
|
|
||||||
|
|
||||||
|
def get_layers_from_blob(path):
|
||||||
|
"""parse json blob and get set of layer digests"""
|
||||||
|
try:
|
||||||
|
with open(path, "r") as blob:
|
||||||
|
data_raw = blob.read()
|
||||||
|
data = json.loads(data_raw)
|
||||||
|
if data["schemaVersion"] == 1:
|
||||||
|
result = set([entry["blobSum"].split(":")[1] for entry in data["fsLayers"]])
|
||||||
|
else:
|
||||||
|
result = set([entry["digest"].split(":")[1] for entry in data["layers"]])
|
||||||
|
if "config" in data:
|
||||||
|
result.add(data["config"]["digest"].split(":")[1])
|
||||||
|
return result
|
||||||
|
except Exception as error:
|
||||||
|
logger.critical("Failed to read layers from blob:%s", error)
|
||||||
|
return set()
|
||||||
|
|
||||||
|
|
||||||
|
def get_digest_from_blob(path):
|
||||||
|
"""parse file and get digest"""
|
||||||
|
try:
|
||||||
|
with open(path, "r") as blob:
|
||||||
|
return blob.read().split(":")[1]
|
||||||
|
except Exception as error:
|
||||||
|
logger.critical("Failed to read digest from blob:%s", error)
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def get_links(path, _filter=None):
|
||||||
|
"""recursively walk `path` and parse every link inside"""
|
||||||
|
result = []
|
||||||
|
for root, _, files in os.walk(path):
|
||||||
|
for each in files:
|
||||||
|
if each == "link":
|
||||||
|
filepath = os.path.join(root, each)
|
||||||
|
if not _filter or _filter in filepath:
|
||||||
|
result.append(get_digest_from_blob(filepath))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class RegistryCleanerError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RegistryCleaner(object):
|
||||||
|
"""Clean registry"""
|
||||||
|
|
||||||
|
def __init__(self, registry_data_dir, dry_run=False):
|
||||||
|
self.registry_data_dir = registry_data_dir
|
||||||
|
if not os.path.isdir(self.registry_data_dir):
|
||||||
|
raise RegistryCleanerError("No repositories directory found inside " \
|
||||||
|
"REGISTRY_DATA_DIR '{0}'.".
|
||||||
|
format(self.registry_data_dir))
|
||||||
|
self.dry_run = dry_run
|
||||||
|
|
||||||
|
def _delete_layer(self, repo, digest):
|
||||||
|
"""remove blob directory from filesystem"""
|
||||||
|
path = os.path.join(self.registry_data_dir, "repositories", repo, "_layers/sha256", digest)
|
||||||
|
self._delete_dir(path)
|
||||||
|
|
||||||
|
def _delete_blob(self, digest):
|
||||||
|
"""remove blob directory from filesystem"""
|
||||||
|
path = os.path.join(self.registry_data_dir, "blobs/sha256", digest[0:2], digest)
|
||||||
|
self._delete_dir(path)
|
||||||
|
|
||||||
|
def _blob_path_for_revision(self, digest):
|
||||||
|
"""where we can find the blob that contains the json describing this digest"""
|
||||||
|
return os.path.join(self.registry_data_dir, "blobs/sha256",
|
||||||
|
digest[0:2], digest, "data")
|
||||||
|
|
||||||
|
def _blob_path_for_revision_is_missing(self, digest):
|
||||||
|
"""for each revision, there should be a blob describing it"""
|
||||||
|
return not os.path.isfile(self._blob_path_for_revision(digest))
|
||||||
|
|
||||||
|
def _get_layers_from_blob(self, digest):
|
||||||
|
"""get layers from blob by digest"""
|
||||||
|
return get_layers_from_blob(self._blob_path_for_revision(digest))
|
||||||
|
|
||||||
|
def _delete_dir(self, path):
|
||||||
|
"""remove directory from filesystem"""
|
||||||
|
if self.dry_run:
|
||||||
|
logger.info("DRY_RUN: would have deleted %s", path)
|
||||||
|
else:
|
||||||
|
logger.info("Deleting %s", path)
|
||||||
|
try:
|
||||||
|
shutil.rmtree(path)
|
||||||
|
except Exception as error:
|
||||||
|
logger.critical("Failed to delete directory:%s", error)
|
||||||
|
|
||||||
|
def _delete_from_tag_index_for_revision(self, repo, digest):
|
||||||
|
"""delete revision from tag indexes"""
|
||||||
|
paths = glob.glob(
|
||||||
|
os.path.join(self.registry_data_dir, "repositories", repo,
|
||||||
|
"_manifests/tags/*/index/sha256", digest)
|
||||||
|
)
|
||||||
|
for path in paths:
|
||||||
|
self._delete_dir(path)
|
||||||
|
|
||||||
|
def _delete_revisions(self, repo, revisions, blobs_to_keep=None):
|
||||||
|
"""delete revisions from list of directories"""
|
||||||
|
if blobs_to_keep is None:
|
||||||
|
blobs_to_keep = []
|
||||||
|
for revision_dir in revisions:
|
||||||
|
digests = get_links(revision_dir)
|
||||||
|
for digest in digests:
|
||||||
|
self._delete_from_tag_index_for_revision(repo, digest)
|
||||||
|
if digest not in blobs_to_keep:
|
||||||
|
self._delete_blob(digest)
|
||||||
|
|
||||||
|
self._delete_dir(revision_dir)
|
||||||
|
|
||||||
|
def _get_tags(self, repo):
|
||||||
|
"""get all tags for given repository"""
|
||||||
|
path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags")
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
logger.critical("No repository '%s' found in repositories directory %s",
|
||||||
|
repo, self.registry_data_dir)
|
||||||
|
return None
|
||||||
|
result = []
|
||||||
|
for each in os.listdir(path):
|
||||||
|
filepath = os.path.join(path, each)
|
||||||
|
if os.path.isdir(filepath):
|
||||||
|
result.append(each)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_repositories(self):
|
||||||
|
"""get all repository repos"""
|
||||||
|
result = []
|
||||||
|
root = os.path.join(self.registry_data_dir, "repositories")
|
||||||
|
for each in os.listdir(root):
|
||||||
|
filepath = os.path.join(root, each)
|
||||||
|
if os.path.isdir(filepath):
|
||||||
|
inside = os.listdir(filepath)
|
||||||
|
if "_layers" in inside:
|
||||||
|
result.append(each)
|
||||||
|
else:
|
||||||
|
for inner in inside:
|
||||||
|
result.append(os.path.join(each, inner))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_all_links(self, except_repo=""):
|
||||||
|
"""get links for every repository"""
|
||||||
|
result = []
|
||||||
|
repositories = self._get_repositories()
|
||||||
|
for repo in [r for r in repositories if r != except_repo]:
|
||||||
|
path = os.path.join(self.registry_data_dir, "repositories", repo)
|
||||||
|
for link in get_links(path):
|
||||||
|
result.append(link)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def prune(self):
|
||||||
|
"""delete all empty directories in registry_data_dir"""
|
||||||
|
del_empty_dirs(self.registry_data_dir, True)
|
||||||
|
|
||||||
|
def _layer_in_same_repo(self, repo, tag, layer):
|
||||||
|
"""check if layer is found in other tags of same repository"""
|
||||||
|
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
|
||||||
|
path = os.path.join(self.registry_data_dir, "repositories", repo,
|
||||||
|
"_manifests/tags", other_tag, "current/link")
|
||||||
|
manifest = get_digest_from_blob(path)
|
||||||
|
try:
|
||||||
|
layers = self._get_layers_from_blob(manifest)
|
||||||
|
if layer in layers:
|
||||||
|
return True
|
||||||
|
except IOError:
|
||||||
|
if self._blob_path_for_revision_is_missing(manifest):
|
||||||
|
logger.warn("Blob for digest %s does not exist. Deleting tag manifest: %s", manifest, other_tag)
|
||||||
|
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo,
|
||||||
|
"_manifests/tags", other_tag)
|
||||||
|
self._delete_dir(tag_dir)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _manifest_in_same_repo(self, repo, tag, manifest):
|
||||||
|
"""check if manifest is found in other tags of same repository"""
|
||||||
|
for other_tag in [t for t in self._get_tags(repo) if t != tag]:
|
||||||
|
path = os.path.join(self.registry_data_dir, "repositories", repo,
|
||||||
|
"_manifests/tags", other_tag, "current/link")
|
||||||
|
other_manifest = get_digest_from_blob(path)
|
||||||
|
if other_manifest == manifest:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def delete_entire_repository(self, repo):
|
||||||
|
"""delete all blobs for given repository repo"""
|
||||||
|
logger.debug("Deleting entire repository '%s'", repo)
|
||||||
|
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
|
||||||
|
if not os.path.isdir(repo_dir):
|
||||||
|
raise RegistryCleanerError("No repository '{0}' found in repositories "
|
||||||
|
"directory {1}/repositories".
|
||||||
|
format(repo, self.registry_data_dir))
|
||||||
|
links = set(get_links(repo_dir))
|
||||||
|
all_links_but_current = set(self._get_all_links(except_repo=repo))
|
||||||
|
for layer in links:
|
||||||
|
if layer in all_links_but_current:
|
||||||
|
logger.debug("Blob found in another repository. Not deleting: %s", layer)
|
||||||
|
else:
|
||||||
|
self._delete_blob(layer)
|
||||||
|
self._delete_dir(repo_dir)
|
||||||
|
|
||||||
|
def delete_repository_tag(self, repo, tag):
|
||||||
|
"""delete all blobs only for given tag of repository"""
|
||||||
|
logger.debug("Deleting repository '%s' with tag '%s'", repo, tag)
|
||||||
|
tag_dir = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", tag)
|
||||||
|
if not os.path.isdir(tag_dir):
|
||||||
|
raise RegistryCleanerError("No repository '{0}' tag '{1}' found in repositories "
|
||||||
|
"directory {2}/repositories".
|
||||||
|
format(repo, tag, self.registry_data_dir))
|
||||||
|
manifests_for_tag = set(get_links(tag_dir))
|
||||||
|
revisions_to_delete = []
|
||||||
|
blobs_to_keep = []
|
||||||
|
layers = []
|
||||||
|
all_links_not_in_current_repo = set(self._get_all_links(except_repo=repo))
|
||||||
|
for manifest in manifests_for_tag:
|
||||||
|
logger.debug("Looking up filesystem layers for manifest digest %s", manifest)
|
||||||
|
|
||||||
|
if self._manifest_in_same_repo(repo, tag, manifest):
|
||||||
|
logger.debug("Not deleting since we found another tag using manifest: %s", manifest)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
revisions_to_delete.append(
|
||||||
|
os.path.join(self.registry_data_dir, "repositories", repo,
|
||||||
|
"_manifests/revisions/sha256", manifest)
|
||||||
|
)
|
||||||
|
if manifest in all_links_not_in_current_repo:
|
||||||
|
logger.debug("Not deleting the blob data since we found another repo using manifest: %s", manifest)
|
||||||
|
blobs_to_keep.append(manifest)
|
||||||
|
|
||||||
|
layers.extend(self._get_layers_from_blob(manifest))
|
||||||
|
|
||||||
|
layers_uniq = set(layers)
|
||||||
|
for layer in layers_uniq:
|
||||||
|
if self._layer_in_same_repo(repo, tag, layer):
|
||||||
|
logger.debug("Not deleting since we found another tag using digest: %s", layer)
|
||||||
|
continue
|
||||||
|
|
||||||
|
self._delete_layer(repo, layer)
|
||||||
|
if layer in all_links_not_in_current_repo:
|
||||||
|
logger.debug("Blob found in another repository. Not deleting: %s", layer)
|
||||||
|
else:
|
||||||
|
self._delete_blob(layer)
|
||||||
|
|
||||||
|
self._delete_revisions(repo, revisions_to_delete, blobs_to_keep)
|
||||||
|
self._delete_dir(tag_dir)
|
||||||
|
|
||||||
|
def delete_untagged(self, repo):
|
||||||
|
"""delete all untagged data from repo"""
|
||||||
|
logger.debug("Deleting utagged data from repository '%s'", repo)
|
||||||
|
repositories_dir = os.path.join(self.registry_data_dir, "repositories")
|
||||||
|
repo_dir = os.path.join(repositories_dir, repo)
|
||||||
|
if not os.path.isdir(repo_dir):
|
||||||
|
raise RegistryCleanerError("No repository '{0}' found in repositories "
|
||||||
|
"directory {1}/repositories".
|
||||||
|
format(repo, self.registry_data_dir))
|
||||||
|
tagged_links = set(get_links(repositories_dir, _filter="current"))
|
||||||
|
layers_to_protect = []
|
||||||
|
for link in tagged_links:
|
||||||
|
layers_to_protect.extend(self._get_layers_from_blob(link))
|
||||||
|
|
||||||
|
unique_layers_to_protect = set(layers_to_protect)
|
||||||
|
for layer in unique_layers_to_protect:
|
||||||
|
logger.debug("layer_to_protect: %s", layer)
|
||||||
|
|
||||||
|
tagged_revisions = set(get_links(repo_dir, _filter="current"))
|
||||||
|
|
||||||
|
revisions_to_delete = []
|
||||||
|
layers_to_delete = []
|
||||||
|
|
||||||
|
dir_for_revisions = os.path.join(repo_dir, "_manifests/revisions/sha256")
|
||||||
|
for rev in os.listdir(dir_for_revisions):
|
||||||
|
if rev not in tagged_revisions:
|
||||||
|
revisions_to_delete.append(os.path.join(dir_for_revisions, rev))
|
||||||
|
for layer in self._get_layers_from_blob(rev):
|
||||||
|
if layer not in unique_layers_to_protect:
|
||||||
|
layers_to_delete.append(layer)
|
||||||
|
|
||||||
|
unique_layers_to_delete = set(layers_to_delete)
|
||||||
|
|
||||||
|
self._delete_revisions(repo, revisions_to_delete)
|
||||||
|
for layer in unique_layers_to_delete:
|
||||||
|
self._delete_blob(layer)
|
||||||
|
self._delete_layer(repo, layer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tag_count(self, repo):
|
||||||
|
logger.debug("Get tag count of repository '%s'", repo)
|
||||||
|
repo_dir = os.path.join(self.registry_data_dir, "repositories", repo)
|
||||||
|
tags_dir = os.path.join(repo_dir, "_manifests/tags")
|
||||||
|
|
||||||
|
if os.path.isdir(tags_dir):
|
||||||
|
tags = os.listdir(tags_dir)
|
||||||
|
return len(tags)
|
||||||
|
else:
|
||||||
|
logger.info("Tags directory does not exist: '%s'", tags_dir)
|
||||||
|
return -1
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""cli entrypoint"""
|
||||||
|
parser = argparse.ArgumentParser(description="Cleanup docker registry")
|
||||||
|
parser.add_argument("-i", "--image",
|
||||||
|
dest="image",
|
||||||
|
required=True,
|
||||||
|
help="Docker image to cleanup")
|
||||||
|
parser.add_argument("-v", "--verbose",
|
||||||
|
dest="verbose",
|
||||||
|
action="store_true",
|
||||||
|
help="verbose")
|
||||||
|
parser.add_argument("-n", "--dry-run",
|
||||||
|
dest="dry_run",
|
||||||
|
action="store_true",
|
||||||
|
help="Dry run")
|
||||||
|
parser.add_argument("-f", "--force",
|
||||||
|
dest="force",
|
||||||
|
action="store_true",
|
||||||
|
help="Force delete (deprecated)")
|
||||||
|
parser.add_argument("-p", "--prune",
|
||||||
|
dest="prune",
|
||||||
|
action="store_true",
|
||||||
|
help="Prune")
|
||||||
|
parser.add_argument("-u", "--untagged",
|
||||||
|
dest="untagged",
|
||||||
|
action="store_true",
|
||||||
|
help="Delete all untagged blobs for image")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
handler = logging.StreamHandler()
|
||||||
|
handler.setFormatter(logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s'))
|
||||||
|
logger.addHandler(handler)
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
else:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
|
||||||
|
# make sure not to log before logging is setup. that'll hose your logging config.
|
||||||
|
if args.force:
|
||||||
|
logger.info(
|
||||||
|
"You supplied the force switch, which is deprecated. It has no effect now, and the script defaults to doing what used to be only happen when force was true")
|
||||||
|
|
||||||
|
splitted = args.image.split(":")
|
||||||
|
if len(splitted) == 2:
|
||||||
|
image = splitted[0]
|
||||||
|
tag = splitted[1]
|
||||||
|
else:
|
||||||
|
image = args.image
|
||||||
|
tag = None
|
||||||
|
|
||||||
|
if 'REGISTRY_DATA_DIR' in os.environ:
|
||||||
|
registry_data_dir = os.environ['REGISTRY_DATA_DIR']
|
||||||
|
else:
|
||||||
|
registry_data_dir = "/opt/registry_data/docker/registry/v2"
|
||||||
|
|
||||||
|
try:
|
||||||
|
cleaner = RegistryCleaner(registry_data_dir, dry_run=args.dry_run)
|
||||||
|
if args.untagged:
|
||||||
|
cleaner.delete_untagged(image)
|
||||||
|
else:
|
||||||
|
if tag:
|
||||||
|
tag_count = cleaner.get_tag_count(image)
|
||||||
|
if tag_count == 1:
|
||||||
|
cleaner.delete_entire_repository(image)
|
||||||
|
else:
|
||||||
|
cleaner.delete_repository_tag(image, tag)
|
||||||
|
else:
|
||||||
|
cleaner.delete_entire_repository(image)
|
||||||
|
|
||||||
|
if args.prune:
|
||||||
|
cleaner.prune()
|
||||||
|
except RegistryCleanerError as error:
|
||||||
|
logger.fatal(error)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
38
docker-compose.yml
Normal file
38
docker-compose.yml
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
registry-ui:
|
||||||
|
image: joxit/docker-registry-ui:main
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- 5558:80
|
||||||
|
environment:
|
||||||
|
- SINGLE_REGISTRY=true
|
||||||
|
- REGISTRY_TITLE=Docker Registry UI
|
||||||
|
#- DELETE_IMAGES=true
|
||||||
|
- SHOW_CONTENT_DIGEST=true
|
||||||
|
- NGINX_PROXY_PASS_URL=http://192.168.1.4:5557
|
||||||
|
- SHOW_CATALOG_NB_TAGS=true
|
||||||
|
- CATALOG_MIN_BRANCHES=1
|
||||||
|
- CATALOG_MAX_BRANCHES=1
|
||||||
|
- TAGLIST_PAGE_SIZE=100
|
||||||
|
- REGISTRY_SECURED=false
|
||||||
|
- CATALOG_ELEMENTS_LIMIT=1000
|
||||||
|
container_name: registry-ui
|
||||||
|
|
||||||
|
registry-server:
|
||||||
|
image: registry:2.8.2
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- 5557:5000
|
||||||
|
environment:
|
||||||
|
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin: '[http://images-ui.patachina.casacam.net]'
|
||||||
|
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Methods: '[HEAD,GET,OPTIONS,DELETE]'
|
||||||
|
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Credentials: '[true]'
|
||||||
|
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Headers: '[Authorization,Accept,Cache-Control]'
|
||||||
|
REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers: '[Docker-Content-Digest]'
|
||||||
|
#REGISTRY_STORAGE_DELETE_ENABLED: 'true'
|
||||||
|
volumes:
|
||||||
|
- /home/nvme/dockerdata/registry:/var/lib/registry
|
||||||
|
- /home/nvme/dockerdata/registry/config:/etc/docker/registry
|
||||||
|
container_name: registry-server
|
Loading…
Reference in a new issue