-
Notifications
You must be signed in to change notification settings - Fork 24
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
ref(Dockerfile): use upstream registry image
This commit removes the forked deis/distribution dependency now that the majority of patches have been applied upstream. The only thing missing was the bucket creation at boot, which has been replaced by the same `create-bucket` script used in deis/postgres.
- Loading branch information
Matthew Fisher
committed
Nov 17, 2016
1 parent
3e247fb
commit 3af1129
Showing
4 changed files
with
113 additions
and
38 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
#!/usr/bin/env python | ||
|
||
import os | ||
|
||
import boto | ||
import json | ||
import swiftclient | ||
from boto import config as botoconfig | ||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat | ||
from oauth2client.service_account import ServiceAccountCredentials | ||
from gcloud.storage.client import Client | ||
from gcloud import exceptions | ||
from azure.storage.blob import BlobService | ||
|
||
def bucket_exists(conn, name): | ||
bucket = conn.lookup(name) | ||
if not bucket: | ||
return False | ||
return True | ||
|
||
bucket_name = os.getenv('BUCKET_NAME') | ||
|
||
if os.getenv('REGISTRY_STORAGE') == "s3" and os.getenv('REGISTRY_STORAGE_S3_BACKEND') != 'minio': | ||
conn = boto.connect_s3() | ||
|
||
if not bucket_exists(conn, bucket_name): | ||
conn.create_bucket(bucket_name) | ||
|
||
elif os.getenv('REGISTRY_STORAGE') == "gcs": | ||
scopes = ['https://www.googleapis.com/auth/devstorage.full_control'] | ||
credentials = ServiceAccountCredentials.from_json_keyfile_name(os.getenv('REGISTRY_STORAGE_GCS_KEYFILE'), scopes=scopes) | ||
with open(os.getenv('REGISTRY_STORAGE_GCS_KEYFILE')) as data_file: | ||
data = json.load(data_file) | ||
conn = Client(credentials=credentials, project=data['project_id']) | ||
try: | ||
conn.get_bucket(bucket_name) | ||
except exceptions.NotFound: | ||
conn.create_bucket(bucket_name) | ||
|
||
elif os.getenv('REGISTRY_STORAGE') == "azure": | ||
conn = BlobService(account_name=os.getenv('REGISTRY_STORAGE_AZURE_ACCOUNTNAME'), account_key=os.getenv('REGISTRY_STORAGE_AZURE_ACCOUNTKEY')) | ||
# Azure doesn't throw an exception if the container exists by default | ||
# https://github.com/Azure/azure-storage-python/blob/master/azure/storage/blob/baseblobservice.py#L504 | ||
conn.create_container(bucket_name) | ||
|
||
elif os.getenv('REGISTRY_STORAGE') == "swift": | ||
conn = swiftclient.Connection( | ||
user=os.getenv('REGISTRY_STORAGE_SWIFT_USERNAME'), | ||
key=os.getenv('REGISTRY_STORAGE_SWIFT_PASSWORD'), | ||
authurl=os.getenv('REGISTRY_STORAGE_SWIFT_AUTHURL'), | ||
auth_version=os.getenv('REGISTRY_STORAGE_SWIFT_AUTHVERSION'), | ||
tenant_name=os.getenv('REGISTRY_STORAGE_SWIFT_TENANT') | ||
) | ||
# swift also does not throw exception if container already exists. | ||
conn.put_container(os.getenv('BUCKET_NAME')) | ||
|
||
elif os.getenv('REGISTRY_STORAGE') == "s3" and os.getenv('REGISTRY_STORAGE_S3_BACKEND') == 'minio': | ||
botoconfig.add_section('s3') | ||
botoconfig.set('s3', 'use-sigv4', 'True') | ||
botoconfig.add_section('Boto') | ||
botoconfig.set('Boto', 'is_secure', 'False') | ||
conn = S3Connection( | ||
host=os.getenv('S3_HOST'), | ||
port=int(os.getenv('S3_PORT')), | ||
calling_format=OrdinaryCallingFormat()) | ||
# HACK(bacongobbler): allow boto to connect to minio by changing the region name for s3v4 auth | ||
conn.auth_region_name = os.getenv('REGISTRY_STORAGE_S3_REGION') | ||
if not bucket_exists(conn, bucket_name): | ||
conn.create_bucket(bucket_name) |