Skip to content

Commit

Permalink
ref(Dockerfile): use upstream registry image
Browse files Browse the repository at this point in the history
This commit removes the forked deis/distribution dependency now that the majority of patches have
been applied upstream. The only thing missing was the bucket creation at boot, which has been
replaced by the same `create-bucket` script used in deis/postgres.
  • Loading branch information
Matthew Fisher committed Nov 17, 2016
1 parent 3e247fb commit 3af1129
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 38 deletions.
1 change: 1 addition & 0 deletions contrib/ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,6 @@ JOB=$(docker run -d $1)
# let the registry run for a few seconds
sleep 5
# check that the registry is still up
docker logs $JOB
docker ps -q --no-trunc=true | grep $JOB
docker rm -f $JOB
26 changes: 24 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,31 +29,36 @@ func main() {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_GCS_BUCKET", string(bucket))
os.Setenv("BUCKET_NAME", string(bucket))
}
} else if storageType == "s3" {
log.Println("INFO: using s3 as the backend")
if accesskey, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/accesskey"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_ACCESSKEY", string(accesskey))
os.Setenv("AWS_ACCESS_KEY_ID", string(accesskey))
}

if secretkey, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/secretkey"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_SECRETKEY", string(secretkey))
os.Setenv("AWS_SECRET_ACCESS_KEY", string(secretkey))
}

if region, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/region"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_REGION", string(region))
os.Setenv("AWS_REGION", string(region))
}

if bucket, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/registry-bucket"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_BUCKET", string(bucket))
os.Setenv("BUCKET_NAME", string(bucket))
}
} else if storageType == "azure" {
log.Println("INFO: using azure as the backend")
Expand All @@ -73,6 +78,7 @@ func main() {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_AZURE_CONTAINER", string(container))
os.Setenv("BUCKET_NAME", string(container))
}

} else if storageType == "minio" {
Expand All @@ -82,21 +88,29 @@ func main() {
os.Setenv("REGISTRY_STORAGE", "s3")
os.Setenv("REGISTRY_STORAGE_S3_BACKEND", "minio")
os.Setenv("REGISTRY_STORAGE_S3_REGIONENDPOINT", fmt.Sprintf("http://%s:%s", mHost, mPort))
// NOTE(bacongobbler): custom envvars used in /bin/create-bucket
os.Setenv("S3_HOST", mHost)
os.Setenv("S3_PORT", mPort)
os.Setenv("S3_USE_SIGV4", "true")

if accesskey, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/accesskey"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_ACCESSKEY", string(accesskey))
os.Setenv("AWS_ACCESS_KEY_ID", string(accesskey))
}

if secretkey, err := ioutil.ReadFile("/var/run/secrets/deis/registry/creds/secretkey"); err != nil {
log.Fatal(err)
} else {
os.Setenv("REGISTRY_STORAGE_S3_SECRETKEY", string(secretkey))
os.Setenv("AWS_SECRET_ACCESS_KEY", string(secretkey))
}

os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1")
os.Setenv("AWS_REGION", "us-east-1")
os.Setenv("REGISTRY_STORAGE_S3_BUCKET", "registry")
os.Setenv("BUCKET_NAME", "registry")

} else if storageType == "swift" {
log.Println("INFO: using swift as the backend")
Expand Down Expand Up @@ -138,11 +152,19 @@ func main() {

}

cmd := exec.Command(registryBinary, command, registryConfig)
// run /bin/create-bucket
cmd := exec.Command("/bin/create-bucket")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatal("Error starting the registry", err)
log.Fatal("Error creating the registry bucket: ", err)
}

cmd = exec.Command(registryBinary, command, registryConfig)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatal("Error starting the registry: ", err)
}
log.Println("INFO: registry started.")
}
Expand Down
55 changes: 19 additions & 36 deletions rootfs/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,41 +1,24 @@
FROM quay.io/deis/base:v0.3.5
FROM registry:2.5.1

COPY . /
RUN apk add --no-cache \
python3 && \
python3 -m ensurepip && \
ln -sf /usr/bin/python3 /usr/bin/python && \
ln -sf /usr/bin/pip3 /usr/bin/pip

RUN buildDeps='gcc git linux-headers musl-dev python3-dev' && \
apk add --no-cache $buildDeps && \
# "upgrade" boto to 2.43.0 + the patch to fix minio connections
pip install --disable-pip-version-check --no-cache-dir --upgrade \
git+https://github.com/deis/boto@88c980e56d1053892eb940d43a15a68af4ebb5e6 \
azure==1.0.3 \
gcloud==0.18.3 \
python-swiftclient==3.1.0 \
python-keystoneclient==3.1.0 && \
# purge dev dependencies
apk del $buildDeps

RUN buildDeps='git golang make'; \
apt-get update && \
apt-get install -y --no-install-recommends \
$buildDeps && \
export DOCKER_REGISTRY_TAG=deis \
DOCKER_REGISTRY_REPO=https://github.com/deis/distribution.git \
DISTRIBUTION_DIR=/go/src/github.com/docker/distribution && \
export GOPATH=/go:$DISTRIBUTION_DIR/Godeps/_workspace && \
git clone -b $DOCKER_REGISTRY_TAG --single-branch $DOCKER_REGISTRY_REPO $DISTRIBUTION_DIR && \
cd $DISTRIBUTION_DIR && \
make binaries && \
cp bin/* /bin/ && \
rm -rf /go && \
# cleanup
apt-get purge -y --auto-remove $buildDeps && \
apt-get autoremove -y && \
apt-get clean -y && \
# package up license files if any by appending to existing tar
COPYRIGHT_TAR='/usr/share/copyrights.tar'; \
gunzip $COPYRIGHT_TAR.gz; tar -rf $COPYRIGHT_TAR /usr/share/doc/*/copyright; gzip $COPYRIGHT_TAR && \
rm -rf \
/usr/share/doc \
/usr/share/man \
/usr/share/info \
/usr/share/locale \
/var/lib/apt/lists/* \
/var/log/* \
/var/cache/debconf/* \
/etc/systemd \
/lib/lsb \
/lib/udev \
/usr/lib/x86_64-linux-gnu/gconv/IBM* \
/usr/lib/x86_64-linux-gnu/gconv/EBC* && \
bash -c "mkdir -p /usr/share/man/man{1..8}"
COPY . /

VOLUME ["/var/lib/registry"]
CMD ["/opt/registry/sbin/registry"]
Expand Down
69 changes: 69 additions & 0 deletions rootfs/bin/create-bucket
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
#!/usr/bin/env python

import os

import boto
import json
import swiftclient
from boto import config as botoconfig
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from oauth2client.service_account import ServiceAccountCredentials
from gcloud.storage.client import Client
from gcloud import exceptions
from azure.storage.blob import BlobService

def bucket_exists(conn, name):
bucket = conn.lookup(name)
if not bucket:
return False
return True

bucket_name = os.getenv('BUCKET_NAME')

if os.getenv('REGISTRY_STORAGE') == "s3" and os.getenv('REGISTRY_STORAGE_S3_BACKEND') != 'minio':
conn = boto.connect_s3()

if not bucket_exists(conn, bucket_name):
conn.create_bucket(bucket_name)

elif os.getenv('REGISTRY_STORAGE') == "gcs":
scopes = ['https://www.googleapis.com/auth/devstorage.full_control']
credentials = ServiceAccountCredentials.from_json_keyfile_name(os.getenv('REGISTRY_STORAGE_GCS_KEYFILE'), scopes=scopes)
with open(os.getenv('REGISTRY_STORAGE_GCS_KEYFILE')) as data_file:
data = json.load(data_file)
conn = Client(credentials=credentials, project=data['project_id'])
try:
conn.get_bucket(bucket_name)
except exceptions.NotFound:
conn.create_bucket(bucket_name)

elif os.getenv('REGISTRY_STORAGE') == "azure":
conn = BlobService(account_name=os.getenv('REGISTRY_STORAGE_AZURE_ACCOUNTNAME'), account_key=os.getenv('REGISTRY_STORAGE_AZURE_ACCOUNTKEY'))
# Azure doesn't throw an exception if the container exists by default
# https://github.com/Azure/azure-storage-python/blob/master/azure/storage/blob/baseblobservice.py#L504
conn.create_container(bucket_name)

elif os.getenv('REGISTRY_STORAGE') == "swift":
conn = swiftclient.Connection(
user=os.getenv('REGISTRY_STORAGE_SWIFT_USERNAME'),
key=os.getenv('REGISTRY_STORAGE_SWIFT_PASSWORD'),
authurl=os.getenv('REGISTRY_STORAGE_SWIFT_AUTHURL'),
auth_version=os.getenv('REGISTRY_STORAGE_SWIFT_AUTHVERSION'),
tenant_name=os.getenv('REGISTRY_STORAGE_SWIFT_TENANT')
)
# swift also does not throw exception if container already exists.
conn.put_container(os.getenv('BUCKET_NAME'))

elif os.getenv('REGISTRY_STORAGE') == "s3" and os.getenv('REGISTRY_STORAGE_S3_BACKEND') == 'minio':
botoconfig.add_section('s3')
botoconfig.set('s3', 'use-sigv4', 'True')
botoconfig.add_section('Boto')
botoconfig.set('Boto', 'is_secure', 'False')
conn = S3Connection(
host=os.getenv('S3_HOST'),
port=int(os.getenv('S3_PORT')),
calling_format=OrdinaryCallingFormat())
# HACK(bacongobbler): allow boto to connect to minio by changing the region name for s3v4 auth
conn.auth_region_name = os.getenv('REGISTRY_STORAGE_S3_REGION')
if not bucket_exists(conn, bucket_name):
conn.create_bucket(bucket_name)

0 comments on commit 3af1129

Please sign in to comment.