Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Azure Blob Storage support #3959

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
/*
* *** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is part of dcm4che, an implementation of DICOM(TM) in
* Java(TM), hosted at https://github.com/dcm4che.
*
* The Initial Developer of the Original Code is
* J4Care.
* Portions created by the Initial Developer are Copyright (C) 2015
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* See @authors listed below
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* *** END LICENSE BLOCK *****
*/

package org.dcm4chee.arc.storage.cloud;

import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;

import org.dcm4chee.arc.storage.CacheInputStream;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.domain.MultipartPart;
import org.jclouds.blobstore.domain.MultipartUpload;
import org.jclouds.io.Payload;
import org.jclouds.io.payloads.InputStreamPayload;
import org.jclouds.blobstore.options.PutOptions;

/**
* @author Daniil Trishkin <kernel.pryanic@protonmail.com>
* @since Feb 2023
*/
class AzureBlobUploader extends CacheInputStream implements Uploader {
@Override
public void upload(BlobStoreContext context, InputStream in, long length, BlobStore blobStore,
String container, String storagePath) throws IOException {
if (fillBuffers(in))
uploadMultipleParts(blobStore, in, container, storagePath);
else
uploadSinglePart(blobStore, container, storagePath);
}

private void uploadSinglePart(BlobStore blobStore, String container, String storagePath) {
Blob blob = blobStore.blobBuilder(storagePath).payload(createPayload()).build();
blobStore.putBlob(container, blob);
}

private Payload createPayload() {
Payload payload = new InputStreamPayload(this);
payload.getContentMetadata().setContentLength(Long.valueOf(available()));
return payload;
}

private void uploadMultipleParts(BlobStore blobStore, InputStream in, String container, String storagePath)
throws IOException {
Blob blob = blobStore.blobBuilder(storagePath).build();
MultipartUpload mpu = blobStore.initiateMultipartUpload(container, blob.getMetadata(), new PutOptions().multipart());
List<MultipartPart> parts = new ArrayList<MultipartPart>();
int partNumber = 1;
do {
parts.add(blobStore.uploadMultipartPart(mpu, partNumber, createPayload()));
partNumber++;
} while (fillBuffers(in));
if (available() > 0)
parts.add(blobStore.uploadMultipartPart(mpu, partNumber, createPayload()));
blobStore.completeMultipartUpload(mpu, parts);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import com.google.common.hash.HashCode;
import org.dcm4che3.net.Device;
import org.dcm4che3.util.AttributesFormat;
import org.dcm4chee.arc.conf.BinaryPrefix;
import org.dcm4chee.arc.conf.StorageDescriptor;
import org.dcm4chee.arc.metrics.MetricsService;
import org.dcm4chee.arc.storage.AbstractStorage;
Expand Down Expand Up @@ -94,8 +93,7 @@ public void upload(BlobStoreContext context, InputStream in, long length,
private final AttributesFormat pathFormat;
private final String container;
private final BlobStoreContext context;
private final boolean streamingUpload;
private final long maxPartSize;
private String api;
private int count;

@Override
Expand All @@ -109,15 +107,13 @@ protected CloudStorage(StorageDescriptor descriptor, MetricsService metricsServi
pathFormat = new AttributesFormat(descriptor.getProperty("pathFormat", DEFAULT_PATH_FORMAT));
container = descriptor.getProperty("container", DEFAULT_CONTAINER);
if (Boolean.parseBoolean(descriptor.getProperty("containerExists", null))) count++;
String api = descriptor.getStorageURI().getSchemeSpecificPart();
api = descriptor.getStorageURI().getSchemeSpecificPart();
String endpoint = null;
int endApi = api.indexOf(':');
if (endApi != -1) {
endpoint = api.substring(endApi + 1);
api = api.substring(0, endApi);
}
this.streamingUpload = Boolean.parseBoolean(descriptor.getProperty("streamingUpload", null));
this.maxPartSize = BinaryPrefix.parse(descriptor.getProperty("maxPartSize", "5G"));
ContextBuilder ctxBuilder = ContextBuilder.newBuilder(api);
String identity = descriptor.getProperty("identity", null);
if (identity != null)
Expand Down Expand Up @@ -200,8 +196,16 @@ private void upload(InputStream in, WriteContext ctx) throws IOException {
.concat(String.format("%08X", ThreadLocalRandom.current().nextInt()));
}
long length = ctx.getContentLength();
Uploader uploader = streamingUpload || length >= 0 && length <= maxPartSize
? STREAMING_UPLOADER : new S3Uploader();
Uploader uploader = STREAMING_UPLOADER;
switch (api) {
case "aws-s3":
uploader = new S3Uploader();
break;
case "azureblob":
uploader = new AzureBlobUploader();
break;
}

uploader.upload(context, in, length, blobStore, container, storagePath);
ctx.setStoragePath(storagePath);
}
Expand Down