diff --git a/geowebcache/azureblob/pom.xml b/geowebcache/azureblob/pom.xml index 88538c828..65cce89fe 100644 --- a/geowebcache/azureblob/pom.xml +++ b/geowebcache/azureblob/pom.xml @@ -19,10 +19,17 @@ - com.microsoft.azure + com.azure azure-storage-blob - - 11.0.0 + + + com.azure + azure-storage-blob-batch + + + + com.azure + azure-identity diff --git a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStore.java b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStore.java index abde0fabc..a3a08f8ec 100644 --- a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStore.java +++ b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStore.java @@ -17,20 +17,18 @@ import static com.google.common.base.Preconditions.checkNotNull; import static java.util.Objects.isNull; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.models.BlobDownloadContentResponse; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlockBlobClient; import com.google.common.collect.AbstractIterator; import com.google.common.collect.Iterators; -import com.microsoft.azure.storage.blob.BlockBlobURL; -import com.microsoft.azure.storage.blob.DownloadResponse; -import com.microsoft.azure.storage.blob.models.BlobGetPropertiesResponse; -import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; -import com.microsoft.azure.storage.blob.models.BlobItem; -import com.microsoft.rest.v2.RestException; -import com.microsoft.rest.v2.util.FlowableUtil; -import io.reactivex.Flowable; import java.io.IOException; import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.HashMap; +import java.io.UncheckedIOException; +import java.time.OffsetDateTime; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -39,8 +37,8 @@ import java.util.concurrent.Callable; import java.util.logging.Logger; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.annotation.Nullable; -import org.apache.commons.io.IOUtils; import org.geotools.util.logging.Logging; import org.geowebcache.filter.parameters.ParametersUtils; import org.geowebcache.io.ByteArrayResource; @@ -80,8 +78,9 @@ public AzureBlobStore( this.keyBuilder = new TMSKeyBuilder(prefix, layers); // check target is suitable for a cache - boolean emptyFolder = client.listBlobs(prefix, 1).isEmpty(); - boolean existingMetadata = !client.listBlobs(keyBuilder.storeMetadata(), 1).isEmpty(); + boolean emptyFolder = !client.prefixExists(prefix); + boolean existingMetadata = client.blobExists(keyBuilder.storeMetadata()); + CompositeBlobStore.checkSuitability( configuration.getLocation(), existingMetadata, emptyFolder); @@ -103,13 +102,7 @@ public boolean delete(String layerName) throws StorageException { final String layerPrefix = keyBuilder.forLayer(layerName); // this might not be there, tolerant delete - try { - BlockBlobURL metadata = client.getBlockBlobURL(metadataKey); - int statusCode = metadata.delete().blockingGet().statusCode(); - if (!HttpStatus.valueOf(statusCode).is2xxSuccessful()) { - return false; - } - } catch (RestException e) { + if (!client.deleteBlob(metadataKey)) { return false; } @@ -133,7 +126,7 @@ public boolean deleteByParametersId(String layerName, String parametersId) try { return deleteManager.scheduleAsyncDelete(prefix); } catch (StorageException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } }) .reduce(Boolean::logicalOr) // Don't use Stream.anyMatch as it would short @@ -164,44 +157,45 @@ public boolean deleteByGridsetId(final String layerName, final String gridSetId) @Override public boolean delete(TileObject obj) throws StorageException { final String key = keyBuilder.forTile(obj); - BlockBlobURL blob = client.getBlockBlobURL(key); + BlockBlobClient blob = client.getBlockBlobClient(key); // don't bother for the extra call if there are no listeners if (listeners.isEmpty()) { try { - int statusCode = blob.delete().blockingGet().statusCode(); - return HttpStatus.valueOf(statusCode).is2xxSuccessful(); - } catch (RestException e) { - return false; + return blob.deleteIfExists(); + } catch (RuntimeException e) { + throw new StorageException("Failed to delete tile ", e); } } + // if there are listeners, gather extra information + long oldSize = 0; try { - // if there are listeners, gather extra information - BlobGetPropertiesResponse properties = blob.getProperties().blockingGet(); - Long oldSize = properties.headers().contentLength(); - int statusCode = blob.delete().blockingGet().statusCode(); - if (!HttpStatus.valueOf(statusCode).is2xxSuccessful()) { - return false; + BlobProperties properties = blob.getProperties(); + oldSize = properties.getBlobSize(); + } catch (BlobStorageException e) { + if (HttpStatus.NOT_FOUND.value() != e.getStatusCode()) { + throw new StorageException("Failed to check if the container exists", e); } - if (oldSize != null) { - obj.setBlobSize(oldSize.intValue()); - } - } catch (RestException e) { - if (e.response().statusCode() != 404) { - throw new StorageException("Failed to delete tile ", e); + } + + try { + boolean deleted = blob.deleteIfExists(); + if (deleted && oldSize > 0L) { + obj.setBlobSize((int) oldSize); + listeners.sendTileDeleted(obj); } - return false; + return deleted; + } catch (RuntimeException e) { + throw new StorageException("Failed to delete tile ", e); } - listeners.sendTileDeleted(obj); - return true; } @Override public boolean delete(TileRange tileRange) throws StorageException { // see if there is anything to delete in that range by computing a prefix final String coordsPrefix = keyBuilder.coordinatesPrefix(tileRange, false); - if (client.listBlobs(coordsPrefix, 1).isEmpty()) { + if (!client.prefixExists(coordsPrefix)) { return false; } @@ -273,24 +267,26 @@ protected long[] computeNext() { @Override public boolean get(TileObject obj) throws StorageException { final String key = keyBuilder.forTile(obj); - final BlockBlobURL blob = client.getBlockBlobURL(key); + boolean found; try { - DownloadResponse response = blob.download().blockingGet(); - ByteBuffer buffer = - FlowableUtil.collectBytesInBuffer(response.body(null)).blockingGet(); - byte[] bytes = new byte[buffer.remaining()]; - buffer.get(bytes); - - obj.setBlobSize(bytes.length); - obj.setBlob(new ByteArrayResource(bytes)); - obj.setCreated(response.headers().lastModified().toEpochSecond() * 1000l); - } catch (RestException e) { - if (e.response().statusCode() == 404) { - return false; + BlobDownloadContentResponse response = client.download(key); + if (null == response) { + obj.setBlob(null); + obj.setBlobSize(0); + found = false; + } else { + BinaryData data = response.getValue(); + OffsetDateTime lastModified = response.getDeserializedHeaders().getLastModified(); + byte[] bytes = data.toBytes(); + obj.setBlobSize(bytes.length); + obj.setBlob(new ByteArrayResource(bytes)); + obj.setCreated(lastModified.toEpochSecond() * 1000l); + found = true; } + } catch (BlobStorageException e) { throw new StorageException("Error getting " + key, e); } - return true; + return found; } @Override @@ -301,50 +297,40 @@ public void put(TileObject obj) throws StorageException { final String key = keyBuilder.forTile(obj); - BlockBlobURL blobURL = client.getBlockBlobURL(key); + BlockBlobClient blobURL = client.getBlockBlobClient(key); // if there are listeners, gather first the old size with a "head" request - Long oldSize = null; + long oldSize = 0L; boolean existed = false; if (!listeners.isEmpty()) { try { - BlobGetPropertiesResponse properties = blobURL.getProperties().blockingGet(); - oldSize = properties.headers().contentLength(); + BlobProperties properties = blobURL.getProperties(); + oldSize = properties.getBlobSize(); existed = true; - } catch (RestException e) { - if (e.response().statusCode() != HttpStatus.NOT_FOUND.value()) { + } catch (BlobStorageException e) { + if (HttpStatus.NOT_FOUND.value() != e.getStatusCode()) { throw new StorageException("Failed to check if the container exists", e); } } } // then upload + String mimeType = getMimeType(obj); try (InputStream is = blob.getInputStream()) { - byte[] bytes = IOUtils.toByteArray(is); - ByteBuffer buffer = ByteBuffer.wrap(bytes); - String mimeType = MimeType.createFromFormat(obj.getBlobFormat()).getMimeType(); - BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobContentType(mimeType); - int status = - blobURL.upload(Flowable.just(buffer), bytes.length, headers, null, null, null) - .blockingGet() - .statusCode(); - if (!HttpStatus.valueOf(status).is2xxSuccessful()) { - throw new StorageException( - "Failed to upload tile to Azure on container " - + client.getContainerName() - + " and key " - + key - + " got HTTP status " - + status); - } - } catch (RestException | IOException | MimeException e) { + Long length = blob.getSize(); + BinaryData data = BinaryData.fromStream(is, length); + client.upload(key, data, mimeType); + } catch (StorageException e) { throw new StorageException( "Failed to upload tile to Azure on container " + client.getContainerName() + " and key " + key, e); + } catch (IOException e) { + throw new StorageException("Error obtaining date from TileObject " + obj); } + // along with the metadata putParametersMetadata(obj.getLayerName(), obj.getParametersId(), obj.getParameters()); @@ -358,6 +344,16 @@ public void put(TileObject obj) throws StorageException { } } + private String getMimeType(TileObject obj) { + String mimeType; + try { + mimeType = MimeType.createFromFormat(obj.getBlobFormat()).getMimeType(); + } catch (MimeException e) { + throw new IllegalArgumentException(e); + } + return mimeType; + } + private void putParametersMetadata( String layerName, String parametersId, Map parameters) { assert (isNull(parametersId) == isNull(parameters)); @@ -370,7 +366,7 @@ private void putParametersMetadata( try { client.putProperties(resourceKey, properties); } catch (StorageException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } } @@ -384,9 +380,6 @@ public void clear() throws StorageException { @Override public void destroy() { shutDown = true; - if (client != null) { - client.close(); - } if (deleteManager != null) { deleteManager.close(); } @@ -404,8 +397,11 @@ public boolean removeListener(BlobStoreListener listener) { @Override public boolean rename(String oldLayerName, String newLayerName) throws StorageException { + // revisit: this seems to hold true only for GeoServerTileLayer, "standalone" TileLayers + // return getName() from getId(), as in AbstractTileLayer. Unfortunately the only option + // for non-GeoServerTileLayers would be copy and delete. Expensive. log.fine("No need to rename layers, AzureBlobStore uses layer id as key root"); - if (!client.listBlobs(oldLayerName, 1).isEmpty()) { + if (client.prefixExists(oldLayerName)) { listeners.sendLayerRenamed(oldLayerName, newLayerName); } return true; @@ -415,8 +411,7 @@ public boolean rename(String oldLayerName, String newLayerName) throws StorageEx @Override public String getLayerMetadata(String layerName, String key) { Properties properties = getLayerMetadata(layerName); - String value = properties.getProperty(key); - return value; + return properties.getProperty(key); } @Override @@ -427,45 +422,35 @@ public void putLayerMetadata(String layerName, String key, String value) { try { client.putProperties(resourceKey, properties); } catch (StorageException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } } private Properties getLayerMetadata(String layerName) { String key = keyBuilder.layerMetadata(layerName); - try { - return client.getProperties(key); - } catch (StorageException e) { - throw new RuntimeException(e); - } + return client.getProperties(key); } @Override public boolean layerExists(String layerName) { - final String coordsPrefix = keyBuilder.forLayer(layerName); - return !client.listBlobs(coordsPrefix, 1).isEmpty(); + final String layerPrefix = keyBuilder.forLayer(layerName); + return client.prefixExists(layerPrefix); } @Override public Map>> getParametersMapping(String layerName) { - // going big, with MAX_VALUE, since at the end everything must be held in memory anyways - List items = - client.listBlobs(keyBuilder.parametersMetadataPrefix(layerName), Integer.MAX_VALUE); - Map>> result = new HashMap<>(); - try { - for (BlobItem item : items) { - - Map properties = - client.getProperties(item.name()).entrySet().stream() - .collect( - Collectors.toMap( - e -> (String) e.getKey(), - e -> (String) e.getValue())); - result.put(ParametersUtils.getId(properties), Optional.of(properties)); - } - return result; - } catch (StorageException e) { - throw new RuntimeException("Failed to retrieve properties mappings", e); - } + // going big, retrieve all items, since at the end everything must be held in memory anyways + String parametersMetadataPrefix = keyBuilder.parametersMetadataPrefix(layerName); + Stream items = client.listBlobs(parametersMetadataPrefix); + + return items.map(BlobItem::getName) + .map(this::loadProperties) + .collect(Collectors.toMap(ParametersUtils::getId, Optional::ofNullable)); + } + + private Map loadProperties(String blobKey) { + Properties properties = client.getProperties(blobKey); + return properties.entrySet().stream() + .collect(Collectors.toMap(e -> (String) e.getKey(), e -> (String) e.getValue())); } } diff --git a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreData.java b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreData.java index 0bd12e95a..db30d793c 100644 --- a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreData.java +++ b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreData.java @@ -29,7 +29,7 @@ public class AzureBlobStoreData { private String accountName; private String accountKey; private Integer maxConnections; - private Boolean useHTTPS; + private boolean useHTTPS; private String proxyHost; private Integer proxyPort; private String proxyUsername; @@ -113,11 +113,11 @@ public void setMaxConnections(Integer maxConnections) { this.maxConnections = maxConnections; } - public Boolean isUseHTTPS() { + public boolean isUseHTTPS() { return useHTTPS; } - public void setUseHTTPS(Boolean useHTTPS) { + public void setUseHTTPS(boolean useHTTPS) { this.useHTTPS = useHTTPS; } @@ -137,6 +137,7 @@ public void setProxyPort(Integer proxyPort) { this.proxyPort = proxyPort; } + /** unused */ public String getProxyUsername() { return proxyUsername; } @@ -145,6 +146,7 @@ public void setProxyUsername(String proxyUsername) { this.proxyUsername = proxyUsername; } + /** unused */ public String getProxyPassword() { return proxyPassword; } diff --git a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreInfo.java b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreInfo.java index 1812863e0..dcbadc6b3 100644 --- a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreInfo.java +++ b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureBlobStoreInfo.java @@ -52,7 +52,7 @@ public class AzureBlobStoreInfo extends BlobStoreInfo { private String maxConnections; - private Boolean useHTTPS = true; + private boolean useHTTPS = true; private String proxyHost; @@ -139,7 +139,7 @@ public Boolean isUseHTTPS() { } /** @param useHTTPS whether to use HTTPS (true) or HTTP (false) when talking to Azure */ - public void setUseHTTPS(Boolean useHTTPS) { + public void setUseHTTPS(boolean useHTTPS) { this.useHTTPS = useHTTPS; } @@ -260,7 +260,7 @@ public int hashCode() { result = prime * result + ((proxyPort == null) ? 0 : proxyPort.hashCode()); result = prime * result + ((proxyUsername == null) ? 0 : proxyUsername.hashCode()); result = prime * result + ((serviceURL == null) ? 0 : serviceURL.hashCode()); - result = prime * result + ((useHTTPS == null) ? 0 : useHTTPS.hashCode()); + result = prime * result + (useHTTPS ? 1 : 0); return result; } @@ -300,9 +300,7 @@ public boolean equals(Object obj) { if (serviceURL == null) { if (other.serviceURL != null) return false; } else if (!serviceURL.equals(other.serviceURL)) return false; - if (useHTTPS == null) { - if (other.useHTTPS != null) return false; - } else if (!useHTTPS.equals(other.useHTTPS)) return false; + if (useHTTPS != other.useHTTPS) return false; return true; } diff --git a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureClient.java b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureClient.java index c56deab32..01576faec 100644 --- a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureClient.java +++ b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/AzureClient.java @@ -14,122 +14,132 @@ */ package org.geowebcache.azure; -import com.microsoft.azure.storage.blob.BlockBlobURL; -import com.microsoft.azure.storage.blob.ContainerURL; -import com.microsoft.azure.storage.blob.DownloadResponse; -import com.microsoft.azure.storage.blob.ListBlobsOptions; -import com.microsoft.azure.storage.blob.PipelineOptions; -import com.microsoft.azure.storage.blob.ServiceURL; -import com.microsoft.azure.storage.blob.SharedKeyCredentials; -import com.microsoft.azure.storage.blob.StorageURL; -import com.microsoft.azure.storage.blob.models.BlobFlatListSegment; -import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; -import com.microsoft.azure.storage.blob.models.BlobItem; -import com.microsoft.azure.storage.blob.models.ContainerListBlobFlatSegmentResponse; -import com.microsoft.rest.v2.RestException; -import com.microsoft.rest.v2.http.HttpClient; -import com.microsoft.rest.v2.http.HttpClientConfiguration; -import com.microsoft.rest.v2.http.NettyClient; -import com.microsoft.rest.v2.http.SharedChannelPoolOptions; -import com.microsoft.rest.v2.util.FlowableUtil; -import io.netty.bootstrap.Bootstrap; -import io.reactivex.Flowable; +import com.azure.core.credential.AzureNamedKeyCredential; +import com.azure.core.http.HttpClient; +import com.azure.core.http.ProxyOptions; +import com.azure.core.http.ProxyOptions.Type; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.ClientOptions; +import com.azure.core.util.Context; +import com.azure.core.util.HttpClientOptions; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobDownloadContentResponse; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.BlockBlobItem; +import com.azure.storage.blob.models.DownloadRetryOptions; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.options.BlockBlobSimpleUploadOptions; +import com.azure.storage.blob.specialized.BlockBlobClient; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.Closeable; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.net.InetSocketAddress; import java.net.Proxy; -import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; +import java.time.Duration; import java.util.Properties; +import java.util.stream.Stream; import javax.annotation.Nullable; import org.geowebcache.storage.StorageException; -import org.geowebcache.util.URLs; import org.springframework.http.HttpStatus; -public class AzureClient implements Closeable { +public class AzureClient { - private final NettyClient.Factory factory; private AzureBlobStoreData configuration; - private final ContainerURL container; + private final BlobContainerClient container; public AzureClient(AzureBlobStoreData configuration) throws StorageException { this.configuration = configuration; - try { - SharedKeyCredentials creds = - new SharedKeyCredentials( - configuration.getAccountName(), configuration.getAccountKey()); - - // setup the HTTPClient, keep the factory on the side to close it down on destroy - factory = - new NettyClient.Factory( - new Bootstrap(), - 0, - new SharedChannelPoolOptions() - .withPoolSize(configuration.getMaxConnections()), - null); - final HttpClient client; - Proxy proxy = configuration.getProxy(); - // not clear how to use credentials for proxy, - // https://github.com/Azure/autorest-clientruntime-for-java/issues/624 - if (proxy != null) { - HttpClientConfiguration clientConfiguration = new HttpClientConfiguration(proxy); - client = factory.create(clientConfiguration); - } else { - client = factory.create(null); - } - // build the container access - PipelineOptions options = new PipelineOptions().withClient(client); - ServiceURL serviceURL = - new ServiceURL( - URLs.of(getServiceURL(configuration)), - StorageURL.createPipeline(creds, options)); + try { + BlobServiceClient serviceClient = createBlobServiceClient(configuration); String containerName = configuration.getContainer(); - this.container = serviceURL.createContainerURL(containerName); - // no way to see if the containerURL already exists, try to create and see if - // we get a 409 CONFLICT - int status; - try { - status = this.container.getProperties().blockingGet().statusCode(); - } catch (com.microsoft.azure.storage.blob.StorageException se) { - status = se.statusCode(); - } - try { - if (status == HttpStatus.NOT_FOUND.value()) { - status = this.container.create(null, null, null).blockingGet().statusCode(); - if (!HttpStatus.valueOf(status).is2xxSuccessful() - && status != HttpStatus.CONFLICT.value()) { - throw new StorageException( - "Failed to create container " - + containerName - + ", REST API returned a " - + status); - } - } - } catch (RestException e) { - if (e.response().statusCode() != HttpStatus.CONFLICT.value()) { - throw new StorageException("Failed to create container", e); - } - } - } catch (Exception e) { + this.container = getOrCreateContainer(serviceClient, containerName); + } catch (StorageException e) { + throw e; + } catch (RuntimeException e) { throw new StorageException("Failed to setup Azure connection and container", e); } } - public String getServiceURL(AzureBlobStoreData configuration) { + BlobContainerClient getOrCreateContainer(BlobServiceClient serviceClient, String containerName) + throws StorageException { + + BlobContainerClient container = serviceClient.getBlobContainerClient(containerName); + if (!container.exists()) { + // use createIfNotExists() instead of create() in case multiple instances are + // starting up at the same time + boolean created = container.createIfNotExists(); + if (!created && !container.exists()) { + throw new StorageException("Failed to create container " + containerName); + } + } + return container; + } + + BlobServiceClient createBlobServiceClient(AzureBlobStoreData configuration) { + String serviceURL = getServiceURL(configuration); + AzureNamedKeyCredential creds = getCredentials(configuration); + ClientOptions clientOpts = new ClientOptions(); + HttpClient httpClient = createHttpClient(configuration); + return new BlobServiceClientBuilder() + .endpoint(serviceURL) + .credential(creds) + .clientOptions(clientOpts) + .httpClient(httpClient) + .buildClient(); + } + + AzureNamedKeyCredential getCredentials(AzureBlobStoreData configuration) { + String accountName = configuration.getAccountName(); + String accountKey = configuration.getAccountKey(); + return new AzureNamedKeyCredential(accountName, accountKey); + } + + HttpClient createHttpClient(AzureBlobStoreData blobStoreConfig) { + + @Nullable Integer maxConnections = blobStoreConfig.getMaxConnections(); + @Nullable ProxyOptions proxyOptions = getProxyOptions(blobStoreConfig); + + HttpClientOptions opts = new HttpClientOptions(); + opts.setProxyOptions(proxyOptions); + opts.setMaximumConnectionPoolSize(maxConnections); + return HttpClient.createDefault(opts); + } + + ProxyOptions getProxyOptions(AzureBlobStoreData blobStoreConfig) { + ProxyOptions proxyOptions = null; + Proxy proxy = blobStoreConfig.getProxy(); + if (null != proxy) { + ProxyOptions.Type type = Type.HTTP; + InetSocketAddress address = (InetSocketAddress) proxy.address(); + String proxyUsername = blobStoreConfig.getProxyUsername(); + String proxyPassword = blobStoreConfig.getProxyPassword(); + + proxyOptions = new ProxyOptions(type, address); + proxyOptions.setCredentials(proxyUsername, proxyPassword); + } + return proxyOptions; + } + + String getServiceURL(AzureBlobStoreData configuration) { String serviceURL = configuration.getServiceURL(); if (serviceURL == null) { // default to account name based location - serviceURL = - (configuration.isUseHTTPS() ? "https" : "http") - + "://" - + configuration.getAccountName() - + ".blob.core.windows.net"; + String proto = configuration.isUseHTTPS() ? "https" : "http"; + String account = configuration.getAccountName(); + serviceURL = String.format("%s://%s.blob.core.windows.net", proto, account); } return serviceURL; } @@ -138,30 +148,41 @@ public String getServiceURL(AzureBlobStoreData configuration) { * Returns a blob for the given key (may not exist yet, and in need of being created) * * @param key The blob key + * @return */ - public BlockBlobURL getBlockBlobURL(String key) { - return container.createBlockBlobURL(key); + public BlockBlobClient getBlockBlobClient(String key) { + BlobClient blobClient = container.getBlobClient(key); + return blobClient.getBlockBlobClient(); } - @Nullable - public byte[] getBytes(String key) throws StorageException { - BlockBlobURL blob = getBlockBlobURL(key); + /** + * @return the blob's download response, or {@code null} if not found + * @throws BlobStorageException + */ + public BlobDownloadContentResponse download(String key) { + BlobClient blobClient = container.getBlobClient(key); + DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(0); + BlobRequestConditions conditions = null; + Duration timeout = null; + Context context = Context.NONE; try { - DownloadResponse response = blob.download().blockingGet(); - ByteBuffer buffer = - FlowableUtil.collectBytesInBuffer(response.body(null)).blockingGet(); - byte[] result = new byte[buffer.remaining()]; - buffer.get(result); - return result; - } catch (RestException e) { - if (e.response().statusCode() == 404) { + return blobClient.downloadContentWithResponse(options, conditions, timeout, context); + } catch (BlobStorageException e) { + if (e.getStatusCode() == HttpStatus.NOT_FOUND.value()) { return null; } - throw new StorageException("Failed to retreive bytes for " + key, e); + throw e; } } - public Properties getProperties(String key) throws StorageException { + /** @throws BlobStorageException If an I/O error occurs. */ + @Nullable + public byte[] getBytes(String key) { + BlobDownloadContentResponse download = download(key); + return download == null ? null : download.getValue().toBytes(); + } + + public Properties getProperties(String key) { Properties properties = new Properties(); byte[] bytes = getBytes(key); if (bytes != null) { @@ -170,7 +191,7 @@ public Properties getProperties(String key) throws StorageException { new InputStreamReader( new ByteArrayInputStream(bytes), StandardCharsets.UTF_8)); } catch (IOException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } } return properties; @@ -178,64 +199,96 @@ public Properties getProperties(String key) throws StorageException { public void putProperties(String resourceKey, Properties properties) throws StorageException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); + String contentType = "text/plain"; + BinaryData data = toBinaryData(properties); + try { - properties.store(out, ""); - } catch (IOException e) { - throw new RuntimeException(e); + upload(resourceKey, data, contentType); + } catch (StorageException e) { + throw new StorageException( + "Failed to update e property file at " + resourceKey, e.getCause()); } + } + + public void upload(String resourceKey, BinaryData data, String contentType) + throws StorageException { + BlockBlobSimpleUploadOptions upload = new BlockBlobSimpleUploadOptions(data); + upload.setHeaders(new BlobHttpHeaders().setContentType(contentType)); + Duration timeout = null; + Context context = Context.NONE; + + Response response; try { - BlockBlobURL blob = getBlockBlobURL(resourceKey); - byte[] bytes = out.toByteArray(); - ByteBuffer buffer = ByteBuffer.wrap(bytes); - BlobHTTPHeaders headers = new BlobHTTPHeaders(); - headers.withBlobContentType("text/plain"); - - int status = - blob.upload(Flowable.just(buffer), bytes.length, headers, null, null, null) - .blockingGet() - .statusCode(); - if (!HttpStatus.valueOf(status).is2xxSuccessful()) { - throw new StorageException( - "Upload request failed with status " - + status - + " on resource " - + resourceKey); - } - } catch (RestException e) { - throw new StorageException("Failed to update e property file at " + resourceKey, e); + BlockBlobClient blob = getBlockBlobClient(resourceKey); + response = blob.uploadWithResponse(upload, timeout, context); + } catch (UncheckedIOException e) { + throw new StorageException("Failed to upload blob " + resourceKey, e.getCause()); + } + int status = response.getStatusCode(); + if (!HttpStatus.valueOf(status).is2xxSuccessful()) { + throw new StorageException( + "Upload request failed with status " + status + " on resource " + resourceKey); } } - public List listBlobs(String prefix, Integer maxResults) { - ContainerListBlobFlatSegmentResponse response = - container - .listBlobsFlatSegment( - null, - new ListBlobsOptions() - .withPrefix(prefix) - .withMaxResults(maxResults)) - .blockingGet(); - - BlobFlatListSegment segment = response.body().segment(); - List items = new ArrayList<>(); - if (segment != null) { - items.addAll(segment.blobItems()); + private BinaryData toBinaryData(Properties properties) { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + properties.store(out, ""); + } catch (IOException e) { + throw new UncheckedIOException(e); } - return items; + BinaryData data = BinaryData.fromBytes(out.toByteArray()); + return data; } - @Override - public void close() { - factory.close(); + public boolean blobExists(String key) { + return getBlockBlobClient(key).exists(); + } + + public boolean prefixExists(String prefix) { + return listBlobs(prefix, 1).findFirst().isPresent(); + } + + /** + * @param prefix + * @return an internally paged Stream, with the default items-per-page setting of {@link + * ListBlobsOptions#getMaxResultsPerPage() 5000} + */ + public Stream listBlobs(String prefix) { + return listBlobs(prefix, 5_000); + } + + /** + * @param prefix + * @param maxResultsPerPage used to control how many items Azure will return per page. It's not + * the max results to return entirely, so use it with caution. + * @return an internally paged (as per {@code maxResultsPerPage}) Stream. + */ + public Stream listBlobs(String prefix, int maxResultsPerPage) { + ListBlobsOptions opts = new ListBlobsOptions().setPrefix(prefix); + + // if > 5000, Azure will return 5000 items per page + opts.setMaxResultsPerPage(maxResultsPerPage); + + // An optional timeout value beyond which a {@link RuntimeException} will be + // raised. revisit: set a timeout? + Duration timeout = null; + PagedIterable blobs = container.listBlobs(opts, timeout); + return blobs.stream(); } public String getContainerName() { return configuration.getContainer(); } - public ContainerURL getContainer() { + public BlobContainerClient getContainer() { return container; } + + public boolean deleteBlob(String key) { + BlockBlobClient metadata = getBlockBlobClient(key); + return metadata.deleteIfExists(); + } } diff --git a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/DeleteManager.java b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/DeleteManager.java index efce89a94..0c5e4204d 100644 --- a/geowebcache/azureblob/src/main/java/org/geowebcache/azure/DeleteManager.java +++ b/geowebcache/azureblob/src/main/java/org/geowebcache/azure/DeleteManager.java @@ -15,17 +15,16 @@ package org.geowebcache.azure; import static org.geowebcache.azure.AzureBlobStore.log; -import static org.springframework.http.HttpStatus.NOT_FOUND; -import com.google.common.base.Strings; +import com.azure.core.http.rest.PagedResponse; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.ListBlobsOptions; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.microsoft.azure.storage.blob.ContainerURL; -import com.microsoft.azure.storage.blob.ListBlobsOptions; -import com.microsoft.azure.storage.blob.models.BlobFlatListSegment; -import com.microsoft.azure.storage.blob.models.BlobItem; -import com.microsoft.azure.storage.blob.models.ContainerListBlobFlatSegmentResponse; -import com.microsoft.rest.v2.RestException; import java.io.Closeable; +import java.io.UncheckedIOException; +import java.time.Instant; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -47,7 +46,6 @@ import org.geowebcache.locks.LockProvider.Lock; import org.geowebcache.storage.StorageException; import org.geowebcache.util.TMSKeyBuilder; -import org.springframework.http.HttpStatus; /** * Class handling deletes, which are normally handled in an asynchronous way in all other stores as @@ -98,22 +96,21 @@ private static ExecutorService createDeleteExecutorService( return Executors.newFixedThreadPool(parallelism, tf); } - // Azure, like S3, truncates timestamps to seconds precision and does not allow to - // programmatically set the last modified time + // Azure, like S3, truncates timestamps to seconds precision and does not allow + // to programmatically set the last modified time private long currentTimeSeconds() { - final long timestamp = (long) Math.ceil(System.currentTimeMillis() / 1000D) * 1000L; - return timestamp; + return Instant.now().getEpochSecond(); } /** * Executes the provided iterator of callables on the delete executor, returning their results */ public void executeParallel(List> callables) throws StorageException { - List futures = new ArrayList<>(); + List> futures = new ArrayList<>(); for (Callable callable : callables) { futures.add(deleteExecutor.submit(callable)); } - for (Future future : futures) { + for (Future future : futures) { try { future.get(); } catch (Exception e) { @@ -176,10 +173,11 @@ public void issuePendingBulkDeletes() throws StorageException { for (Map.Entry e : deletes.entrySet()) { final String prefix = e.getKey().toString(); final long timestamp = Long.parseLong(e.getValue().toString()); - log.info( - String.format( - "Restarting pending bulk delete on '%s/%s':%d", - client.getContainerName(), prefix, timestamp)); + if (log.isLoggable(Level.INFO)) + log.info( + String.format( + "Restarting pending bulk delete on '%s/%s':%d", + client.getContainerName(), prefix, timestamp)); if (!asyncDelete(prefix, timestamp)) { deletesToClear.add(prefix); } @@ -199,11 +197,12 @@ public void issuePendingBulkDeletes() throws StorageException { public synchronized boolean asyncDelete(String prefix, long timestamp) { // do we have anything to delete? - if (client.listBlobs(prefix, 1).isEmpty()) { + if (!client.prefixExists(prefix)) { return false; } - // is there any task already deleting a larger set of times in the same prefix folder? + // is there any task already deleting a larger set of times in the same prefix + // folder? Long currentTaskTime = pendingDeletesKeyTime.get(prefix); if (currentTaskTime != null && currentTaskTime.longValue() > timestamp) { return false; @@ -216,38 +215,6 @@ public synchronized boolean asyncDelete(String prefix, long timestamp) { return true; } - private void clearPendingBulkDelete(final String prefix, final long timestamp) - throws GeoWebCacheException { - Long taskTime = pendingDeletesKeyTime.get(prefix); - if (taskTime == null) { - return; // someone else cleared it up for us. A task that run after this one but - // finished before? - } - if (taskTime.longValue() > timestamp) { - return; // someone else issued a bulk delete after this one for the same key prefix - } - final String pendingDeletesKey = keyBuilder.pendingDeletes(); - final Lock lock = locks.getLock(pendingDeletesKey); - - try { - Properties deletes = client.getProperties(pendingDeletesKey); - String storedVal = (String) deletes.remove(prefix); - long storedTimestamp = storedVal == null ? Long.MIN_VALUE : Long.parseLong(storedVal); - if (timestamp >= storedTimestamp) { - client.putProperties(pendingDeletesKey, deletes); - } else { - log.info( - String.format( - "bulk delete finished but there's a newer one ongoing for container '%s/%s'", - client.getContainerName(), prefix)); - } - } catch (StorageException e) { - throw new RuntimeException(e); - } finally { - lock.release(); - } - } - @Override public void close() { deleteExecutor.shutdownNow(); @@ -267,34 +234,26 @@ public Long call() throws Exception { long count = 0L; try { checkInterrupted(); - log.info( - String.format( - "Running bulk delete on '%s/%s':%d", - client.getContainerName(), prefix, timestamp)); + if (log.isLoggable(Level.INFO)) + log.info( + String.format( + "Running bulk delete on '%s/%s':%d", + client.getContainerName(), prefix, timestamp)); - ContainerURL container = client.getContainer(); + BlobContainerClient container = client.getContainer(); int jobPageSize = Math.max(concurrency, PAGE_SIZE); ListBlobsOptions options = - new ListBlobsOptions().withPrefix(prefix).withMaxResults(jobPageSize); - ContainerListBlobFlatSegmentResponse response = - container.listBlobsFlatSegment(null, options, null).blockingGet(); - - Predicate filter = - blobItem -> { - long lastModified = - blobItem.properties().lastModified().toEpochSecond() * 1000; - return timestamp >= lastModified; - }; - - while (response.body().segment() != null) { - checkInterrupted(); - deleteItems(container, response.body().segment(), filter); - String marker = response.body().nextMarker(); - // marker will be empty if there is no next page - if (Strings.isNullOrEmpty(marker)) break; - // fetch next page - response = container.listBlobsFlatSegment(marker, options, null).blockingGet(); + new ListBlobsOptions().setPrefix(prefix).setMaxResultsPerPage(jobPageSize); + Iterable> response = + container.listBlobs(options, null).iterableByPage(); + + for (PagedResponse segment : response) { + try (PagedResponse s = segment) { // try-with-resources to please PMD + checkInterrupted(); + List items = s.getValue(); + count += deleteItems(container, items, this::equalOrAfter); + } } } catch (InterruptedException | IllegalStateException e) { log.info( @@ -302,7 +261,7 @@ public Long call() throws Exception { "Azure bulk delete aborted for '%s/%s'. Will resume on next startup.", client.getContainerName(), prefix)); throw e; - } catch (Exception e) { + } catch (RuntimeException e) { log.log( Level.WARNING, String.format( @@ -312,26 +271,67 @@ public Long call() throws Exception { throw e; } - log.info( - String.format( - "Finished bulk delete on '%s/%s':%d. %d objects deleted", - client.getContainerName(), prefix, timestamp, count)); + if (log.isLoggable(Level.INFO)) + log.info( + String.format( + "Finished bulk delete on '%s/%s':%d. %d objects deleted", + client.getContainerName(), prefix, timestamp, count)); clearPendingBulkDelete(prefix, timestamp); return count; } + private boolean equalOrAfter(BlobItem blobItem) { + OffsetDateTime lastModified = blobItem.getProperties().getLastModified(); + long lastModifiedSecs = lastModified.toEpochSecond(); + return timestamp >= lastModifiedSecs; + } + + private void clearPendingBulkDelete(final String prefix, final long timestamp) + throws GeoWebCacheException { + Long taskTime = pendingDeletesKeyTime.get(prefix); + if (taskTime == null) { + return; // someone else cleared it up for us. A task that run after this one but + // finished before? + } + if (taskTime.longValue() > timestamp) { + return; // someone else issued a bulk delete after this one for the same key prefix + } + final String pendingDeletesKey = keyBuilder.pendingDeletes(); + final Lock lock = locks.getLock(pendingDeletesKey); + + try { + Properties deletes = client.getProperties(pendingDeletesKey); + String storedVal = (String) deletes.remove(prefix); + long storedTimestamp = + storedVal == null ? Long.MIN_VALUE : Long.parseLong(storedVal); + if (timestamp >= storedTimestamp) { + client.putProperties(pendingDeletesKey, deletes); + } else if (log.isLoggable(Level.INFO)) { + log.info( + String.format( + "bulk delete finished but there's a newer one ongoing for container '%s/%s'", + client.getContainerName(), prefix)); + } + } catch (StorageException e) { + throw new UncheckedIOException(e); + } finally { + lock.release(); + } + } + private long deleteItems( - ContainerURL container, BlobFlatListSegment segment, Predicate filter) + BlobContainerClient container, List segment, Predicate filter) throws ExecutionException, InterruptedException { + List> collect = - segment.blobItems().stream() - .filter(item -> filter.test(item)) + segment.stream() + .filter(filter) .map( item -> deleteExecutor.submit( () -> { - deleteItem(container, item); + deleteItem(container, item.getName()); return null; })) .collect(Collectors.toList()); @@ -341,27 +341,6 @@ private long deleteItems( } return collect.size(); } - - private void deleteItem(ContainerURL container, BlobItem item) { - String key = item.name(); - try { - - int status = container.createBlobURL(key).delete().blockingGet().statusCode(); - if (status != NOT_FOUND.value() && !HttpStatus.valueOf(status).is2xxSuccessful()) { - throw new RuntimeException( - "Deletion failed with status " + status + " on resource " + key); - } - } catch (RestException e) { - if (e.response().statusCode() != NOT_FOUND.value()) { - throw new RuntimeException( - "Deletion failed with status " - + e.response().statusCode() - + " on resource " - + key, - e); - } - } - } } public class KeysBulkDelete implements Callable { @@ -385,7 +364,7 @@ public Long call() throws Exception { keys.subList(0, Math.min(keys.size(), 100)))); } - ContainerURL container = client.getContainer(); + BlobContainerClient container = client.getContainer(); for (int i = 0; i < keys.size(); i += PAGE_SIZE) { deleteItems(container, keys.subList(i, Math.min(i + PAGE_SIZE, keys.size()))); @@ -398,50 +377,30 @@ public Long call() throws Exception { throw e; } - log.info( - String.format( - "Finished bulk delete on %s, %d objects deleted", - client.getContainerName(), count)); + if (log.isLoggable(Level.INFO)) + log.info( + String.format( + "Finished bulk delete on %s, %d objects deleted", + client.getContainerName(), count)); return count; } - private long deleteItems(ContainerURL container, List itemNames) + private long deleteItems(BlobContainerClient container, List itemNames) throws ExecutionException, InterruptedException { - List> collect = + List> collect = itemNames.stream() - .map( - item -> - deleteExecutor.submit( - () -> { - return deleteItem(container, item); - })) + .map(item -> deleteExecutor.submit(() -> deleteItem(container, item))) .collect(Collectors.toList()); - for (Future f : collect) { + for (Future f : collect) { f.get(); } return collect.size(); } + } - private Object deleteItem(ContainerURL container, String item) { - try { - int status = container.createBlobURL(item).delete().blockingGet().statusCode(); - if (status != NOT_FOUND.value() && !HttpStatus.valueOf(status).is2xxSuccessful()) { - throw new RuntimeException( - "Deletion failed with status " + status + " on resource " + item); - } - } catch (RestException e) { - if (e.response().statusCode() != NOT_FOUND.value()) { - throw new RuntimeException( - "Deletion failed with status " - + e.response().statusCode() - + " on resource " - + item, - e); - } - } - return null; - } + private boolean deleteItem(BlobContainerClient container, String key) { + return container.getBlobClient(key).deleteIfExists(); } void checkInterrupted() throws InterruptedException { diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreIntegrationTest.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreIntegrationTest.java index 2c4ba5f34..5906f9e7b 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreIntegrationTest.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreIntegrationTest.java @@ -96,7 +96,7 @@ public void before() throws Exception { TileLayerDispatcher layers = mock(TileLayerDispatcher.class); LockProvider lockProvider = new NoOpLockProvider(); TileLayer layer = mock(TileLayer.class); - when(layers.getTileLayer(eq(DEFAULT_LAYER))).thenReturn(layer); + when(layers.getTileLayer(DEFAULT_LAYER)).thenReturn(layer); when(layer.getName()).thenReturn(DEFAULT_LAYER); when(layer.getId()).thenReturn(DEFAULT_LAYER); blobStore = new AzureBlobStore(config, layers, lockProvider); diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreSuitabilityTest.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreSuitabilityTest.java index 22e6688b9..ea56ca3d9 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreSuitabilityTest.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/AzureBlobStoreSuitabilityTest.java @@ -18,8 +18,11 @@ import static org.hamcrest.Matchers.hasItemInArray; import static org.junit.Assert.assertTrue; -import io.reactivex.Flowable; -import java.nio.ByteBuffer; +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.core.util.Context; +import com.azure.storage.blob.models.BlockBlobItem; +import com.azure.storage.blob.options.BlockBlobSimpleUploadOptions; import org.easymock.EasyMock; import org.geowebcache.azure.tests.container.AzuriteAzureBlobStoreSuitabilityIT; import org.geowebcache.azure.tests.online.OnlineAzureBlobStoreSuitabilityIT; @@ -63,13 +66,13 @@ public void setup() throws Exception { protected abstract AzureClient getClient(); - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) @Override protected Matcher existing() { return (Matcher) hasItemInArray(equalTo("metadata.properties")); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) @Override protected Matcher empty() { return (Matcher) Matchers.emptyArray(); @@ -80,14 +83,13 @@ public BlobStore create(Object dir) throws Exception { AzureBlobStoreData info = getConfiguration(); for (String path : (String[]) dir) { String fullPath = info.getPrefix() + "/" + path; - ByteBuffer byteBuffer = ByteBuffer.wrap("testAbc".getBytes()); - int statusCode = + BinaryData data = BinaryData.fromString("testAbc"); + Response response = getClient() - .getBlockBlobURL(fullPath) - .upload(Flowable.just(byteBuffer), byteBuffer.limit()) - .blockingGet() - .statusCode(); - assertTrue(HttpStatus.valueOf(statusCode).is2xxSuccessful()); + .getBlockBlobClient(fullPath) + .uploadWithResponse( + new BlockBlobSimpleUploadOptions(data), null, Context.NONE); + assertTrue(HttpStatus.valueOf(response.getStatusCode()).is2xxSuccessful()); } return new AzureBlobStore(info, tld, locks); } diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreConformanceIT.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreConformanceIT.java index 58c0abfab..aae6b52cd 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreConformanceIT.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreConformanceIT.java @@ -30,14 +30,8 @@ */ public class AzuriteAzureBlobStoreConformanceIT extends AzureBlobStoreConformanceTest { - /** - * Use "legacy" container to work with {@literal - * com.microsoft.azure:azure-storage-blob:jar:11.0.0}. Instantiate it as - * AzuriteContainer.legacy().debugLegacy() to print out request/response information for - * debugging purposes - */ @ClassRule - public static AzuriteContainer azurite = AzuriteContainer.legacy().disabledWithoutDocker(); + public static AzuriteContainer azurite = AzuriteContainer.latest().disabledWithoutDocker(); /** Used to get a per-test case Azure container */ @Rule public TestName testName = new TestName(); diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreIntegrationIT.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreIntegrationIT.java index 9f4fba858..ac5ca2763 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreIntegrationIT.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreIntegrationIT.java @@ -30,14 +30,8 @@ */ public class AzuriteAzureBlobStoreIntegrationIT extends AzureBlobStoreIntegrationTest { - /** - * Use "legacy" container to work with {@literal - * com.microsoft.azure:azure-storage-blob:jar:11.0.0}. Instantiate it as - * AzuriteContainer.legacy().debugLegacy() to print out request/response information for - * debugging purposes - */ @ClassRule - public static AzuriteContainer azurite = AzuriteContainer.legacy().disabledWithoutDocker(); + public static AzuriteContainer azurite = AzuriteContainer.latest().disabledWithoutDocker(); /** Used to get a per-test case Azure container */ @Rule public TestName testName = new TestName(); diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreSuitabilityIT.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreSuitabilityIT.java index 3cdc8c164..ed7298e8d 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreSuitabilityIT.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/container/AzuriteAzureBlobStoreSuitabilityIT.java @@ -34,14 +34,8 @@ */ public class AzuriteAzureBlobStoreSuitabilityIT extends AzureBlobStoreSuitabilityTest { - /** - * Use "legacy" container to work with {@literal - * com.microsoft.azure:azure-storage-blob:jar:11.0.0}. Instantiate it as - * AzuriteContainer.legacy().debugLegacy() to print out request/response information for - * debugging purposes - */ @ClassRule - public static AzuriteContainer azurite = AzuriteContainer.legacy().disabledWithoutDocker(); + public static AzuriteContainer azurite = AzuriteContainer.latest().disabledWithoutDocker(); /** Used to get a per-test case Azure container */ @Rule public TestName testName = new TestName(); diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/OnlineAzureBlobStoreIntegrationIT.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/OnlineAzureBlobStoreIntegrationIT.java index 25a07b89d..957102153 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/OnlineAzureBlobStoreIntegrationIT.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/OnlineAzureBlobStoreIntegrationIT.java @@ -21,7 +21,6 @@ import org.junit.Assume; import org.junit.Rule; import org.junit.Test; -import org.springframework.http.HttpStatus; public class OnlineAzureBlobStoreIntegrationIT extends AzureBlobStoreIntegrationTest { @@ -41,14 +40,8 @@ protected AzureBlobStoreData getConfiguration() { public void testCreatesStoreMetadataOnStart() { String prefix = tempFolder.getConfig().getPrefix(); // if the file does not exist a StorageException will be thrown - int status = - tempFolder - .getClient() - .getBlockBlobURL(prefix + "/metadata.properties") - .getProperties() - .blockingGet() - .statusCode(); - assertTrue( - "Expected success but got " + status, HttpStatus.valueOf(status).is2xxSuccessful()); + String key = prefix + "/metadata.properties"; + boolean exists = tempFolder.getClient().getBlockBlobClient(key).exists(); + assertTrue("blob " + key + " does not exist", exists); } } diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/TemporaryAzureFolder.java b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/TemporaryAzureFolder.java index 89774b3db..c6d8222b7 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/TemporaryAzureFolder.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/azure/tests/online/TemporaryAzureFolder.java @@ -17,15 +17,14 @@ import static com.google.common.base.Preconditions.checkState; import static org.junit.Assert.assertTrue; -import com.microsoft.azure.storage.blob.BlockBlobURL; -import com.microsoft.azure.storage.blob.models.BlobItem; -import java.util.List; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.specialized.BlockBlobClient; import java.util.Properties; import java.util.UUID; +import java.util.stream.Stream; import org.geowebcache.azure.AzureBlobStoreData; import org.geowebcache.azure.AzureClient; import org.junit.rules.ExternalResource; -import org.springframework.http.HttpStatus; /** * The TemporaryAzureFolder provides a path prefix for Azure storage and deletes all resources under @@ -71,7 +70,6 @@ protected void after() { delete(); } finally { temporaryPrefix = null; - client.close(); } } @@ -117,13 +115,14 @@ public void delete() { return; } - List blobs = client.listBlobs(temporaryPrefix, Integer.MAX_VALUE); - for (BlobItem blob : blobs) { - BlockBlobURL blockBlobURL = client.getBlockBlobURL(blob.name()); - int status = blockBlobURL.delete().blockingGet().statusCode(); - assertTrue( - "Expected success but got " + status + " while deleting " + blob.name(), - HttpStatus.valueOf(status).is2xxSuccessful()); + try (Stream blobs = client.listBlobs(temporaryPrefix)) { + blobs.forEach( + blob -> { + BlockBlobClient blockBlobURL = client.getBlockBlobClient(blob.getName()); + assertTrue( + "Expected success while deleting " + blob.getName(), + blockBlobURL.deleteIfExists()); + }); } } diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainer.java b/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainer.java index fb8d06b60..857122c6f 100644 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainer.java +++ b/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainer.java @@ -18,9 +18,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; -import java.io.IOException; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.exception.UncheckedException; import org.geowebcache.azure.AzureBlobStoreData; import org.geowebcache.azure.tests.container.AzuriteAzureBlobStoreConformanceIT; import org.junit.Assume; @@ -33,14 +31,15 @@ /** * Testcontainers container for AWS Azurite - * blobstore test environment. + * "https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite">Azurite Azure + * Storage Emulator. * *

Runs the Azurite * Docker image for local Azure Storage development with testcontainers. * - *

Azurite accepts the same well-known account and key used by the legacy Azure Storage Emulator: + *

Azurite sets up the following well-known account and key for the Azure Storage Emulator, + * available through {@link #getAccountName()} and {@link #getAccountKey()}: * *

    *
  • Account name: {@code devstoreaccount1} @@ -52,15 +51,7 @@ * *
      * 
    - *   @Rule public AzuriteContainer azurite = AzuriteContainer.legacy();
    - * 
    - * 
    - * - * works with the old {@code com.microsoft.azure:azure-storage-blob:jar:11.0.0} as a dependency. - * - *
    - * 
    - *   @Rule public AzuriteContainer azurite = AzuriteContainer.legacy();
    + *   @Rule public AzuriteContainer azurite = AzuriteContainer.latest();
      * 
      * 
    * @@ -70,7 +61,7 @@ * *
      * 
    - *   @ClassRule public static AzuriteContainer azurite = AzuriteContainer.legacy();
    + *   @ClassRule public static AzuriteContainer azurite = AzuriteContainer.latest();
      *
      *   @Test
      *   public void azureBlobStoreSmokeTest(){
    @@ -87,78 +78,24 @@ public class AzuriteContainer extends GenericContainer {
         private static final DockerImageName LATEST_IMAGE =
                 DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:latest");
     
    -    private static final DockerImageName LEGACY_IMAGE =
    -            DockerImageName.parse("arafato/azurite:2.6.5");
    -
         private final String accountName = "devstoreaccount1";
         private final String accountKey =
                 "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==";
     
         private final int blobsPort = 10_000;
     
    -    private AzuriteContainerLegacyProxy proxy;
    -
    -    private final boolean doProxy;
    -
    -    /** Whether to print request/response debug information when in {@link #legacy} mode */
    -    private boolean debugRequests;
    -
         /** flag for {@link #disabledWithoutDocker()} */
         private boolean disabledWithoutDocker;
     
    -    private AzuriteContainer(DockerImageName imageName, boolean doProxy) {
    +    private AzuriteContainer(DockerImageName imageName) {
             super(imageName);
    -        this.doProxy = doProxy;
             super.setWaitStrategy(Wait.forListeningPort());
             super.addExposedPort(blobsPort);
         }
     
    -    /**
    -     * @return a container running {@code arafato/azurite:2.6.5} and {@link #getBlobsPort() proxied}
    -     *     to fix protocol discrepancies so it works correctly with older {@code
    -     *     com.microsoft.azure:azure-storage-blob} dependencies
    -     */
    -    public static AzuriteContainer legacy() {
    -        return new AzuriteContainer(LEGACY_IMAGE, true);
    -    }
    -
         /** @return a container running {@code mcr.microsoft.com/azure-storage/azurite:latest} */
         public static AzuriteContainer latest() {
    -        return new AzuriteContainer(LATEST_IMAGE, false);
    -    }
    -
    -    /**
    -     * Enables request/response debugging when in legacy mode
    -     *
    -     * 

    Sample output: - * - *

    -     * 
    -     * routing GET http://localhost:44445/devstoreaccount1/testputgetblobisnotbytearrayresource/topp%3Aworld%2FEPSG%3A4326%2Fpng%2Fdefault%2F12%2F20%2F30.png to GET http://localhost:33319/devstoreaccount1/testputgetblobisnotbytearrayresource/topp%3Aworld%2FEPSG%3A4326%2Fpng%2Fdefault%2F12%2F20%2F30.png
    -     * 	applied request header Authorization: SharedKey devstoreaccount1:6UeSk1Qf8XRibLI1sE3tasmDxOtVxGUSMDQqRUDIW9Y=
    -     * 	applied request header x-ms-version: 2018-11-09
    -     * 	applied request header x-ms-date: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied request header host: localhost
    -     * 	applied request header x-ms-client-request-id: 526b726a-13af-49a3-b277-fdf645d77903
    -     * 	applied request header User-Agent: Azure-Storage/11.0.0 (JavaJRE 11.0.23; Linux 6.8.0-39-generic)
    -     * 	response: 200 OK
    -     * 	applied response header X-Powered-By: Express
    -     * 	applied response header ETag: "jzUOHaHcch36ue3TFspQaLiWSvo"
    -     * 	applied response header Last-Modified: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied response header x-ms-version: 2016-05-31
    -     * 	applied response header date: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied response header x-ms-request-id: 05130dd1-5672-11ef-a96b-c7f08f042b95
    -     * 	applied response header accept-ranges: bytes
    -     * 	applied response header x-ms-blob-type: BlockBlob
    -     * 	applied response header x-ms-request-server-encrypted: false
    -     * 	applied response header Content-Type: image/png
    -     * 	Content-Type: image/png
    -     * 
    -     * 
    - */ - public AzuriteContainer debugLegacy() { - this.debugRequests = true; - return this; + return new AzuriteContainer(LATEST_IMAGE); } /** @@ -189,32 +126,6 @@ public Statement apply(Statement base, Description description) { return super.apply(base, description); } - @Override - public void start() { - super.start(); - if (doProxy && proxy == null) { - int targetPort = getRealBlobsPort(); - proxy = new AzuriteContainerLegacyProxy(targetPort).debugRequests(debugRequests); - try { - proxy.start(); - } catch (IOException e) { - throw new UncheckedException(e); - } - } - } - - @Override - public void stop() { - super.stop(); - if (doProxy && null != proxy) { - try { - proxy.stop(); - } finally { - proxy = null; - } - } - } - public String getAccountName() { return accountName; } @@ -234,14 +145,6 @@ public String getAccountKey() { * carries over. */ public int getBlobsPort() { - if (doProxy) { - if (proxy == null) throw new IllegalStateException(""); - return proxy.getLocalPort(); - } - return getRealBlobsPort(); - } - - int getRealBlobsPort() { return super.getMappedPort(blobsPort); } diff --git a/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainerLegacyProxy.java b/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainerLegacyProxy.java deleted file mode 100644 index 6874f31a1..000000000 --- a/geowebcache/azureblob/src/test/java/org/geowebcache/testcontainers/azure/AzuriteContainerLegacyProxy.java +++ /dev/null @@ -1,331 +0,0 @@ -/** - * This program is free software: you can redistribute it and/or modify it under the terms of the - * GNU Lesser General Public License as published by the Free Software Foundation, either version 3 - * of the License, or (at your option) any later version. - * - *

    This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; - * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - *

    You should have received a copy of the GNU Lesser General Public License along with this - * program. If not, see . - * - *

    Copyright 2024 - */ -package org.geowebcache.testcontainers.azure; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.net.ServerSocket; -import java.util.Optional; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.logging.Logger; -import java.util.stream.Stream; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpException; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.methods.RequestBuilder; -import org.apache.http.entity.BasicHttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.DefaultBHttpServerConnectionFactory; -import org.apache.http.impl.bootstrap.HttpServer; -import org.apache.http.impl.bootstrap.ServerBootstrap; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.protocol.HttpContext; -import org.apache.http.protocol.HttpProcessor; -import org.apache.http.protocol.HttpProcessorBuilder; -import org.apache.http.protocol.HttpRequestHandler; -import org.apache.http.protocol.ResponseConnControl; -import org.apache.http.protocol.ResponseContent; -import org.apache.http.util.EntityUtils; -import org.geotools.util.logging.Logging; - -/** - * A simple HTTP proxy to adapt some Azure Blob storage protocol issues to the netty version used by - * older {@code com.microsoft.azure:azure-storage-blob} dependencies. - * - *

    For instance, re-writes the returned response headers {@code etag}, {@code last-modified}, and - * {@code content-type}, as {@code Etag}, {@code Last-Modified}, and {@code Content-Type}, - * respectively, as expected by the Netty version the legacy {@code - * com.microsoft.azure:azure-storage-blob} dependency transitively carries over. - * - *

    Even though HTTP request and response headers should be case-insensitive, this older netty - * version ({@code 4.1.28}, and even newer ones) fail to parse the lower-case names returned by - * Azurite. - */ -class AzuriteContainerLegacyProxy { - public static Logger LOGGER = Logging.getLogger(AzuriteContainerLegacyProxy.class.getName()); - - private int localPort; - private HttpServer proxyServer; - - private int targetPort; - - private final AtomicBoolean started = new AtomicBoolean(); - - private boolean debug; - - AzuriteContainerLegacyProxy(int targetPort) { - this.targetPort = targetPort; - } - - /** - * @return the random port where the proxy server is running - * @throws IllegalStateException if the proxy is not {@link #start() running} - */ - public int getLocalPort() { - if (!started.get()) { - throw new IllegalStateException( - "Proxy not running, local port is allocated at start()"); - } - return localPort; - } - - /** - * Whether to print request/response debugging information to stderr. - * - *

    Sample output: - * - *

    -     * 
    -     * routing GET http://localhost:44445/devstoreaccount1/testputgetblobisnotbytearrayresource/topp%3Aworld%2FEPSG%3A4326%2Fpng%2Fdefault%2F12%2F20%2F30.png to GET http://localhost:33319/devstoreaccount1/testputgetblobisnotbytearrayresource/topp%3Aworld%2FEPSG%3A4326%2Fpng%2Fdefault%2F12%2F20%2F30.png
    -     * 	applied request header Authorization: SharedKey devstoreaccount1:6UeSk1Qf8XRibLI1sE3tasmDxOtVxGUSMDQqRUDIW9Y=
    -     * 	applied request header x-ms-version: 2018-11-09
    -     * 	applied request header x-ms-date: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied request header host: localhost
    -     * 	applied request header x-ms-client-request-id: 526b726a-13af-49a3-b277-fdf645d77903
    -     * 	applied request header User-Agent: Azure-Storage/11.0.0 (JavaJRE 11.0.23; Linux 6.8.0-39-generic)
    -     * 	response: 200 OK
    -     * 	applied response header X-Powered-By: Express
    -     * 	applied response header ETag: "jzUOHaHcch36ue3TFspQaLiWSvo"
    -     * 	applied response header Last-Modified: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied response header x-ms-version: 2016-05-31
    -     * 	applied response header date: Fri, 09 Aug 2024 17:08:38 GMT
    -     * 	applied response header x-ms-request-id: 05130dd1-5672-11ef-a96b-c7f08f042b95
    -     * 	applied response header accept-ranges: bytes
    -     * 	applied response header x-ms-blob-type: BlockBlob
    -     * 	applied response header x-ms-request-server-encrypted: false
    -     * 	applied response header Content-Type: image/png
    -     * 	Content-Type: image/png
    -     * 
    -     * 
    - */ - public AzuriteContainerLegacyProxy debugRequests(boolean debug) { - this.debug = debug; - return this; - } - - /** Allocates a free port and runs the proxy server on it. This method is idempotent. */ - public void start() throws IOException { - if (started.compareAndSet(false, true)) { - this.localPort = findFreePort(); - - // this is the request handler that performs the proxying and fixes the response headers - HttpRequestHandler proxyHandler = new ProxyHandler(localPort, targetPort, debug); - - HttpProcessor httpproc = - HttpProcessorBuilder.create() - // handles Transfer-Encoding and Content-Length - .add(new ResponseContent(true)) - // handles connection keep-alive - .add(new ResponseConnControl()) - .build(); - - proxyServer = - ServerBootstrap.bootstrap() - .setConnectionFactory(DefaultBHttpServerConnectionFactory.INSTANCE) - .setHttpProcessor(httpproc) - .setListenerPort(localPort) - .registerHandler("*", proxyHandler) - .create(); - proxyServer.start(); - } - } - - /** Stops the proxy server. This method is idempotent. */ - public void stop() { - if (started.compareAndSet(true, false)) { - proxyServer.stop(); - } - } - - private int findFreePort() { - try (ServerSocket s = new ServerSocket(0)) { - return s.getLocalPort(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - private static class ProxyHandler implements HttpRequestHandler { - private final int sourcePort; - private final int targetPort; - private boolean debug; - - final CloseableHttpClient client; - Function responseHeaderNameTransform = Function.identity(); - - ProxyHandler(int sourcePort, int targetPort, boolean debug) { - this.sourcePort = sourcePort; - this.targetPort = targetPort; - this.debug = debug; - - @SuppressWarnings("PMD.CloseResource") - PoolingHttpClientConnectionManager connManager = - new PoolingHttpClientConnectionManager(); - client = - HttpClients.custom() - .setConnectionManager(connManager) - .setKeepAliveStrategy(new DefaultConnectionKeepAliveStrategy()) - .build(); - } - - @Override - public void handle(HttpRequest request, HttpResponse response, HttpContext context) - throws HttpException, IOException { - HttpUriRequest proxyRequest = proxify(request); - logRequest(request, proxyRequest); - - try (CloseableHttpResponse proxyResponse = client.execute(proxyRequest)) { - response.setStatusLine(proxyResponse.getStatusLine()); // status and reason phrase - logResponseStatus(response); - - Header[] headers = proxyResponse.getAllHeaders(); - applyResponseHeaders(response, headers); - transferResponseEntity(response, proxyResponse); - } - } - - private void transferResponseEntity(HttpResponse localResponse, HttpResponse remoteResponse) - throws IOException { - final HttpEntity remoteResponseEntity = remoteResponse.getEntity(); - HttpEntity entity; - if (null == remoteResponseEntity) { - entity = emptyBodyEntity(remoteResponse); - } else { - entity = extractResponseBody(remoteResponseEntity); - } - EntityUtils.updateEntity(localResponse, entity); - } - - private HttpEntity extractResponseBody(final HttpEntity remoteResponseEntity) - throws IOException { - ContentType contentType = ContentType.get(remoteResponseEntity); - byte[] rawContent = EntityUtils.toByteArray(remoteResponseEntity); - logResponseBody(contentType, rawContent); - return new ByteArrayEntity(rawContent, 0, rawContent.length, contentType); - } - - private HttpEntity emptyBodyEntity(HttpResponse remoteResponse) { - BasicHttpEntity entity = new BasicHttpEntity(); - Optional.ofNullable(remoteResponse.getFirstHeader("Content-Length")) - .map(Header::getValue) - .map(Long::parseLong) - .ifPresent(cl -> entity.setContentLength(cl)); - Header contentType = remoteResponse.getFirstHeader("Content-Type"); - entity.setContentType(contentType); - return entity; - } - - private void logResponseStatus(HttpResponse response) { - StatusLine statusLine = response.getStatusLine(); - info("\tresponse: %d %s", statusLine.getStatusCode(), statusLine.getReasonPhrase()); - } - - private void logResponseBody(ContentType contentType, byte[] rawContent) { - if (null != contentType) { - info("\tContent-Type: %s", contentType); - if (contentType.getMimeType().startsWith("application/xml") - || contentType.getMimeType().contains("json")) { - info("\tcontent:\t%s", new String(rawContent)); - } - } - } - - private void logRequest(HttpRequest request, HttpUriRequest proxyRequest) { - info( - "routing %s %s to %s %s", - request.getRequestLine().getMethod(), - request.getRequestLine().getUri(), - proxyRequest.getRequestLine().getMethod(), - proxyRequest.getRequestLine().getUri()); - - Stream.of(proxyRequest.getAllHeaders()) - .forEach( - header -> - info( - "\tapplied request header %s: %s", - header.getName(), header.getValue())); - } - - private void applyResponseHeaders(HttpResponse response, Header[] headers) { - if (null == headers || headers.length == 0) return; - - Stream.of(headers) - .forEach( - header -> { - String name = header.getName(); - String value = header.getValue(); - name = responseHeaderNameTransform.apply(name); - if ("Connection".equalsIgnoreCase(name) - || "Transfer-Encoding".equalsIgnoreCase(name) - || "Content-Length".equalsIgnoreCase(name)) { - // these will produce a 'Connection reset by peer', let the - // proxy handle them - return; - } - // Fix the problematic response header names - if ("etag".equalsIgnoreCase(name)) { - name = "ETag"; - } else if ("last-modified".equalsIgnoreCase(name)) { - name = "Last-Modified"; - } else if ("content-type".equalsIgnoreCase(name)) { - name = "Content-Type"; - } - response.addHeader(name, value); - info("\tapplied response header %s: %s", name, value); - }); - } - - private HttpUriRequest proxify(HttpRequest request) { - - RequestLine requestLine = request.getRequestLine(); - - String uri = - requestLine - .getUri() - .replace( - "http://localhost:" + sourcePort, - "http://localhost:" + targetPort); - - HttpUriRequest proxyRequest = - RequestBuilder.copy(request) - .setUri(uri) - // these will produce a 'Connection reset by peer', let the - // proxy handle them - .removeHeaders("Connection") - .removeHeaders("Transfer-Encoding") - .removeHeaders("Content-Length") - .build(); - return proxyRequest; - } - - @SuppressWarnings("PMD.SystemPrintln") - private void info(String msg, Object... params) { - if (debug) { - System.err.printf(msg + "%n", params); - } - } - } -} diff --git a/geowebcache/pom.xml b/geowebcache/pom.xml index dc16021b3..2e50362d0 100644 --- a/geowebcache/pom.xml +++ b/geowebcache/pom.xml @@ -130,6 +130,15 @@ + + + com.azure + azure-sdk-bom + 1.2.26 + pom + import + + javax.servlet javax.servlet-api