rightsStr = new ArrayList<>();
- for (com.microsoft.azure.management.eventhub.AccessRights rights : resource.rights()) {
- rightsStr.add(rights.toString());
- }
- info.append("\n\tRights: ").append(rightsStr);
- System.out.println(info.toString());
- }
-
- /**
- * Print event hub namespace recovery pairing auth rule key.
- *
- * @param resource event hub namespace disaster recovery pairing auth rule key
- */
- public static void print(DisasterRecoveryPairingAuthorizationKey resource) {
- StringBuilder info = new StringBuilder();
- info.append("DisasterRecoveryPairing auth key: ")
- .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString())
- .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString())
- .append("\n\t Primary key: ").append(resource.primaryKey())
- .append("\n\t Secondary key: ").append(resource.secondaryKey())
- .append("\n\t Primary connection string: ").append(resource.primaryConnectionString())
- .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString());
- System.out.println(info.toString());
- }
-
- /**
- * Print event hub consumer group.
- *
- * @param resource event hub consumer group
- */
- public static void print(EventHubConsumerGroup resource) {
- StringBuilder info = new StringBuilder();
- info.append("Event hub consumer group: ").append(resource.id())
- .append("\n\tName: ").append(resource.name())
- .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName())
- .append("\n\tNamespace: ").append(resource.namespaceName())
- .append("\n\tEvent hub name: ").append(resource.eventHubName())
- .append("\n\tUser metadata: ").append(resource.userMetadata());
- System.out.println(info.toString());
- }
-
- /**
- * Print Batch AI Cluster.
- *
- * @param resource batch ai cluster
- */
- public static void print(BatchAICluster resource) {
- StringBuilder info = new StringBuilder("Batch AI cluster: ")
- .append("\n\tId: ").append(resource.id())
- .append("\n\tName: ").append(resource.name())
- .append("\n\tResource group: ").append(resource.workspace().resourceGroupName())
- .append("\n\tRegion: ").append(resource.workspace().regionName())
- .append("\n\tVM Size: ").append(resource.vmSize())
- .append("\n\tVM Priority: ").append(resource.vmPriority())
- .append("\n\tSubnet: ").append(resource.subnet())
- .append("\n\tAllocation state: ").append(resource.allocationState())
- .append("\n\tAllocation state transition time: ").append(resource.allocationStateTransitionTime())
- .append("\n\tCreation time: ").append(resource.creationTime())
- .append("\n\tCurrent node count: ").append(resource.currentNodeCount())
- .append("\n\tAllocation state transition time: ").append(resource.allocationStateTransitionTime())
- .append("\n\tAllocation state transition time: ").append(resource.allocationStateTransitionTime());
- if (resource.scaleSettings().autoScale() != null) {
- info.append("\n\tAuto scale settings: ")
- .append("\n\t\tInitial node count: ").append(resource.scaleSettings().autoScale().initialNodeCount())
- .append("\n\t\tMinimum node count: ").append(resource.scaleSettings().autoScale().minimumNodeCount())
- .append("\n\t\tMaximum node count: ").append(resource.scaleSettings().autoScale().maximumNodeCount());
- }
- if (resource.scaleSettings().manual() != null) {
- info.append("\n\tManual scale settings: ")
- .append("\n\t\tTarget node count: ").append(resource.scaleSettings().manual().targetNodeCount())
- .append("\n\t\tDeallocation option: ")
- .append(resource.scaleSettings().manual().nodeDeallocationOption());
- }
- if (resource.nodeStateCounts() != null) {
- info.append("\n\tNode state counts: ")
- .append("\n\t\tRunning nodes count: ").append(resource.nodeStateCounts().runningNodeCount())
- .append("\n\t\tIdle nodes count: ").append(resource.nodeStateCounts().idleNodeCount())
- .append("\n\t\tPreparing nodes count: ").append(resource.nodeStateCounts().preparingNodeCount())
- .append("\n\t\tLeaving nodes count: ").append(resource.nodeStateCounts().leavingNodeCount())
- .append("\n\t\tPreparing nodes count: ").append(resource.nodeStateCounts().preparingNodeCount());
- }
- if (resource.virtualMachineConfiguration() != null && resource.virtualMachineConfiguration().imageReference() != null) {
- info.append("\n\tVirtual machine configuration: ")
- .append("\n\t\tPublisher: ").append(resource.virtualMachineConfiguration().imageReference().publisher())
- .append("\n\t\tOffer: ").append(resource.virtualMachineConfiguration().imageReference().offer())
- .append("\n\t\tSku: ").append(resource.virtualMachineConfiguration().imageReference().sku())
- .append("\n\t\tVersion: ").append(resource.virtualMachineConfiguration().imageReference().version());
- }
- if (resource.nodeSetup() != null && resource.nodeSetup().setupTask() != null) {
- info.append("\n\tSetup task: ")
- .append("\n\t\tCommand line: ").append(resource.nodeSetup().setupTask().commandLine())
- .append("\n\t\tStdout/err Path Prefix: ").append(resource.nodeSetup().setupTask().stdOutErrPathPrefix());
- }
- System.out.println(info.toString());
- }
-
- /**
- * Print Batch AI Job.
- *
- * @param resource batch ai job
- */
- public static void print(BatchAIJob resource) {
- StringBuilder info = new StringBuilder("Batch AI job: ")
- .append("\n\tId: ").append(resource.id())
- .append("\n\tName: ").append(resource.name())
- .append("\n\tCluster Id: ").append(resource.cluster())
- .append("\n\tCreation time: ").append(resource.creationTime())
- .append("\n\tNode count: ").append(resource.nodeCount())
- .append("\n\tPriority: ").append(resource.schedulingPriority())
- .append("\n\tExecution state: ").append(resource.executionState())
- .append("\n\tExecution state transition time: ").append(resource.executionStateTransitionTime())
- .append("\n\tTool type: ").append(resource.toolType())
- .append("\n\tExperiment name: ").append(resource.experiment().name());
- if (resource.mountVolumes() != null) {
- info.append("\n\tMount volumes:");
- if (resource.mountVolumes().azureFileShares() != null) {
- info.append("\n\t\tAzure fileshares:");
- for (AzureFileShareReference share : resource.mountVolumes().azureFileShares()) {
- info.append("\n\t\t\tAccount name:").append(share.accountName())
- .append("\n\t\t\tFile Url:").append(share.azureFileUrl())
- .append("\n\t\t\tDirectory mode:").append(share.directoryMode())
- .append("\n\t\t\tFile mode:").append(share.fileMode())
- .append("\n\t\t\tRelative mount path:").append(share.relativeMountPath());
- }
- }
- }
- System.out.println(info.toString());
- }
/**
* Print Diagnostic Setting.
@@ -3125,32 +3194,209 @@ public static void print(MetricAlert metricAlert) {
}
System.out.println(info.toString());
}
- private static OkHttpClient httpClient;
- /**
- * Ensure the HTTP client is valid.
+// /**
+// * Print spring service settings.
+// *
+// * @param springService spring service instance
+// */
+// public static void print(SpringService springService) {
+// StringBuilder info = new StringBuilder("Spring Service: ")
+// .append("\n\tId: ").append(springService.id())
+// .append("\n\tName: ").append(springService.name())
+// .append("\n\tResource Group: ").append(springService.resourceGroupName())
+// .append("\n\tRegion: ").append(springService.region())
+// .append("\n\tTags: ").append(springService.tags());
+//
+// ConfigServerProperties serverProperties = springService.getServerProperties();
+// if (serverProperties != null && serverProperties.provisioningState() != null
+// && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) {
+// info.append("\n\tProperties: ");
+// if (serverProperties.configServer().gitProperty() != null) {
+// info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri());
+// }
+// }
+//
+// if (springService.sku() != null) {
+// info.append("\n\tSku: ")
+// .append("\n\t\tName: ").append(springService.sku().name())
+// .append("\n\t\tTier: ").append(springService.sku().tier())
+// .append("\n\t\tCapacity: ").append(springService.sku().capacity());
+// }
+//
+// MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting();
+// if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null
+// && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) {
+// info.append("\n\tTrace: ")
+// .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled())
+// .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey());
+// }
+//
+// System.out.println(info.toString());
+// }
+//
+// /**
+// * Print spring app settings.
+// *
+// * @param springApp spring app instance
+// */
+// public static void print(SpringApp springApp) {
+// StringBuilder info = new StringBuilder("Spring Service: ")
+// .append("\n\tId: ").append(springApp.id())
+// .append("\n\tName: ").append(springApp.name())
+// .append("\n\tCreated Time: ").append(springApp.createdTime())
+// .append("\n\tPublic Endpoint: ").append(springApp.isPublic())
+// .append("\n\tUrl: ").append(springApp.url())
+// .append("\n\tHttps Only: ").append(springApp.isHttpsOnly())
+// .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn())
+// .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName());
+//
+// if (springApp.temporaryDisk() != null) {
+// info.append("\n\tTemporary Disk:")
+// .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB())
+// .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath());
+// }
+//
+// if (springApp.persistentDisk() != null) {
+// info.append("\n\tPersistent Disk:")
+// .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB())
+// .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath());
+// }
+//
+// if (springApp.identity() != null) {
+// info.append("\n\tIdentity:")
+// .append("\n\t\tType: ").append(springApp.identity().type())
+// .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId())
+// .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId());
+// }
+//
+// System.out.println(info.toString());
+// }
+
+ /**
+ * Sends a GET request to target URL.
+ *
+ * Retry logic tuned for AppService.
+ * The method does not handle 301 redirect.
*
+ * @param urlString the target URL.
+ * @return Content of the HTTP response.
*/
- private static OkHttpClient ensureValidHttpClient() {
- if (httpClient == null) {
- httpClient = new OkHttpClient.Builder().readTimeout(1, TimeUnit.MINUTES).build();
- }
+ public static String sendGetRequest(String urlString) {
+ ClientLogger logger = new ClientLogger(Utils.class);
- return httpClient;
+ try {
+ Mono>> response =
+ HTTP_CLIENT.getString(getHost(urlString), getPathAndQuery(urlString))
+ .retryWhen(Retry
+ .fixedDelay(5, Duration.ofSeconds(30))
+ .filter(t -> {
+ boolean retry = false;
+ if (t instanceof TimeoutException) {
+ retry = true;
+ } else if (t instanceof HttpResponseException
+ && ((HttpResponseException) t).getResponse().getStatusCode() == 503) {
+ retry = true;
+ }
+
+ if (retry) {
+ logger.info("retry GET request to {}", urlString);
+ }
+ return retry;
+ }));
+ Response ret = stringResponse(response).block();
+ return ret == null ? null : ret.getValue();
+ } catch (MalformedURLException e) {
+ logger.logThrowableAsError(e);
+ return null;
+ }
}
/**
- * Connect to a specified URL using "curl" like HTTP GET client.
+ * Sends a POST request to target URL.
+ *
+ * Retry logic tuned for AppService.
*
- * @param url URL to be tested
- * @return the HTTP GET response content
- */
- public static String curl(String url) {
- Request request = new Request.Builder().url(url).get().build();
+ * @param urlString the target URL.
+ * @param body the request body.
+ * @return Content of the HTTP response.
+ * */
+ public static String sendPostRequest(String urlString, String body) {
+ ClientLogger logger = new ClientLogger(Utils.class);
+
try {
- return ensureValidHttpClient().newCall(request).execute().body().string();
- } catch (IOException e) {
+ Mono> response =
+ stringResponse(HTTP_CLIENT.postString(getHost(urlString), getPathAndQuery(urlString), body))
+ .retryWhen(Retry
+ .fixedDelay(5, Duration.ofSeconds(30))
+ .filter(t -> {
+ boolean retry = false;
+ if (t instanceof TimeoutException) {
+ retry = true;
+ }
+
+ if (retry) {
+ logger.info("retry POST request to {}", urlString);
+ }
+ return retry;
+ }));
+ Response ret = response.block();
+ return ret == null ? null : ret.getValue();
+ } catch (Exception e) {
+ logger.logThrowableAsError(e);
return null;
}
}
+
+ private static Mono> stringResponse(Mono>> responseMono) {
+ return responseMono.flatMap(response -> FluxUtil.collectBytesInByteBufferStream(response.getValue())
+ .map(bytes -> new String(bytes, StandardCharsets.UTF_8))
+ .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str)));
+ }
+
+ private static String getHost(String urlString) throws MalformedURLException {
+ URL url = new URL(urlString);
+ String protocol = url.getProtocol();
+ String host = url.getAuthority();
+ return protocol + "://" + host;
+ }
+
+ private static String getPathAndQuery(String urlString) throws MalformedURLException {
+ URL url = new URL(urlString);
+ String path = url.getPath();
+ String query = url.getQuery();
+ if (query != null && !query.isEmpty()) {
+ path = path + "?" + query;
+ }
+ return path;
+ }
+
+ private static final WebAppTestClient HTTP_CLIENT = RestProxy.create(
+ WebAppTestClient.class,
+ new HttpPipelineBuilder()
+ .policies(
+ new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)),
+ new RetryPolicy("Retry-After", ChronoUnit.SECONDS))
+ .build());
+
+ @Host("{$host}")
+ @ServiceInterface(name = "WebAppTestClient")
+ private interface WebAppTestClient {
+ @Get("{path}")
+ @ExpectedResponses({200, 400, 404})
+ Mono>> getString(@HostParam("$host") String host, @PathParam(value = "path", encoded = true) String path);
+
+ @Post("{path}")
+ @ExpectedResponses({200, 400, 404})
+ Mono>> postString(@HostParam("$host") String host, @PathParam(value = "path", encoded = true) String path, @BodyParam("text/plain") String body);
+ }
+
+ public static int getSize(Iterable iterable) {
+ int res = 0;
+ Iterator iterator = iterable.iterator();
+ while (iterator.hasNext()) {
+ iterator.next();
+ }
+ return res;
+ }
}
diff --git a/src/main/java/com/microsoft/azure/management/monitor/samples/QueryMetricsAndActivityLogs.java b/src/main/java/com/microsoft/azure/management/monitor/samples/QueryMetricsAndActivityLogs.java
deleted file mode 100644
index 7babad4..0000000
--- a/src/main/java/com/microsoft/azure/management/monitor/samples/QueryMetricsAndActivityLogs.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Copyright (c) Microsoft Corporation. All rights reserved.
- * Licensed under the MIT License. See License.txt in the project root for
- * license information.
- */
-
-package com.microsoft.azure.management.monitor.samples;
-
-import com.microsoft.azure.PagedList;
-import com.microsoft.azure.management.Azure;
-import com.microsoft.azure.management.monitor.EventData;
-import com.microsoft.azure.management.monitor.MetadataValue;
-import com.microsoft.azure.management.monitor.Metric;
-import com.microsoft.azure.management.monitor.MetricCollection;
-import com.microsoft.azure.management.monitor.MetricDefinition;
-import com.microsoft.azure.management.monitor.MetricValue;
-import com.microsoft.azure.management.monitor.TimeSeriesElement;
-import com.microsoft.azure.management.resources.fluentcore.arm.Region;
-import com.microsoft.azure.management.resources.fluentcore.utils.SdkContext;
-import com.microsoft.azure.management.samples.Utils;
-import com.microsoft.azure.management.storage.AccessTier;
-import com.microsoft.azure.management.storage.StorageAccount;
-import com.microsoft.azure.management.storage.StorageAccountKey;
-import com.microsoft.azure.storage.CloudStorageAccount;
-import com.microsoft.azure.storage.LoggingOperations;
-import com.microsoft.azure.storage.LoggingProperties;
-import com.microsoft.azure.storage.MetricsLevel;
-import com.microsoft.azure.storage.MetricsProperties;
-import com.microsoft.azure.storage.ServiceProperties;
-import com.microsoft.azure.storage.StorageException;
-import com.microsoft.azure.storage.blob.CloudBlobClient;
-import com.microsoft.azure.storage.blob.CloudBlobContainer;
-import com.microsoft.azure.storage.blob.CloudBlockBlob;
-import com.microsoft.rest.LogLevel;
-import org.joda.time.DateTime;
-import org.joda.time.Period;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URISyntaxException;
-import java.security.InvalidKeyException;
-import java.util.EnumSet;
-import java.util.List;
-
-/**
- * This sample shows examples of retrieving metrics and activity logs for Storage Account.
- * - List all metric definitions available for a storage account
- * - Retrieve and show metrics for the past 7 days for Transactions where
- * - Api name was 'PutBlob' and
- * - response type was 'Success' and
- * - Geo type was 'Primary'
- * - Retrieve and show all activity logs for the past 7 days for the same Storage account.
- */
-public final class QueryMetricsAndActivityLogs {
-
- /**
- * Main function which runs the actual sample.
- * @param azure instance of the azure client
- * @return true if sample runs successfully
- */
- public static boolean runSample(Azure azure) {
- final String storageAccountName = Utils.createRandomName("saMonitor");
- final String rgName = Utils.createRandomName("rgMonitor");
-
- try {
- // ============================================================
- // Create a storage account
-
- System.out.println("Creating a Storage Account");
-
- StorageAccount storageAccount = azure.storageAccounts().define(storageAccountName)
- .withRegion(Region.US_EAST)
- .withNewResourceGroup(rgName)
- .withBlobStorageAccountKind()
- .withAccessTier(AccessTier.COOL)
- .create();
-
- System.out.println("Created a Storage Account:");
- Utils.print(storageAccount);
-
- List storageAccountKeys = storageAccount.getKeys();
- final String storageConnectionString = String.format("DefaultEndpointsProtocol=http;AccountName=%s;AccountKey=%s",
- storageAccount.name(),
- storageAccountKeys.get(0).value());
-
- // Add some blob transaction events
- addBlobTransactions(storageConnectionString);
-
- DateTime recordDateTime = DateTime.now();
- // get metric definitions for storage account.
- for (MetricDefinition metricDefinition : azure.metricDefinitions().listByResource(storageAccount.id())) {
- // find metric definition for Transactions
- if (metricDefinition.name().localizedValue().equalsIgnoreCase("transactions")) {
- // get metric records
- MetricCollection metricCollection = metricDefinition.defineQuery()
- .startingFrom(recordDateTime.minusDays(7))
- .endsBefore(recordDateTime)
- .withAggregation("Average")
- .withInterval(Period.minutes(5))
- .withOdataFilter("apiName eq 'PutBlob' and responseType eq 'Success' and geoType eq 'Primary'")
- .execute();
-
- System.out.println("Metrics for '" + storageAccount.id() + "':");
- System.out.println("Namespacse: " + metricCollection.namespace());
- System.out.println("Query time: " + metricCollection.timespan());
- System.out.println("Time Grain: " + metricCollection.interval());
- System.out.println("Cost: " + metricCollection.cost());
-
- for (Metric metric : metricCollection.metrics()) {
- System.out.println("\tMetric: " + metric.name().localizedValue());
- System.out.println("\tType: " + metric.type());
- System.out.println("\tUnit: " + metric.unit());
- System.out.println("\tTime Series: ");
- for (TimeSeriesElement timeElement : metric.timeseries()) {
- System.out.println("\t\tMetadata: ");
- for (MetadataValue metadata : timeElement.metadatavalues()) {
- System.out.println("\t\t\t" + metadata.name().localizedValue() + ": " + metadata.value());
- }
- System.out.println("\t\tData: ");
- for (MetricValue data : timeElement.data()) {
- System.out.println("\t\t\t" + data.timeStamp()
- + " : (Min) " + data.minimum()
- + " : (Max) " + data.maximum()
- + " : (Avg) " + data.average()
- + " : (Total) " + data.total()
- + " : (Count) " + data.count());
- }
- }
- }
- break;
- }
- }
-
- // get activity logs for the same period.
- PagedList logs = azure.activityLogs().defineQuery()
- .startingFrom(recordDateTime.minusDays(7))
- .endsBefore(recordDateTime)
- .withAllPropertiesInResponse()
- .filterByResource(storageAccount.id())
- .execute();
-
- System.out.println("Activity logs for the Storage Account:");
-
- for (EventData event : logs) {
- if (event.eventName() != null) {
- System.out.println("\tEvent: " + event.eventName().localizedValue());
- }
- if (event.operationName() != null) {
- System.out.println("\tOperation: " + event.operationName().localizedValue());
- }
- System.out.println("\tCaller: " + event.caller());
- System.out.println("\tCorrelationId: " + event.correlationId());
- System.out.println("\tSubscriptionId: " + event.subscriptionId());
- }
-
- return true;
- } catch (Exception f) {
- System.out.println(f.getMessage());
- f.printStackTrace();
- } finally {
- if (azure.resourceGroups().getByName(rgName) != null) {
- System.out.println("Deleting Resource Group: " + rgName);
- azure.resourceGroups().deleteByName(rgName);
- System.out.println("Deleted Resource Group: " + rgName);
- } else {
- System.out.println("Did not create any resources in Azure. No clean up is necessary");
- }
- }
- return false;
- }
-
- /**
- * Main entry point.
- * @param args the parameters
- */
- public static void main(String[] args) {
- try {
-
- final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION"));
-
- Azure azure = Azure.configure()
- .withLogLevel(LogLevel.NONE)
- .authenticate(credFile)
- .withDefaultSubscription();
-
- // Print selected subscription
- System.out.println("Selected subscription: " + azure.subscriptionId());
-
- runSample(azure);
- } catch (Exception e) {
- System.out.println(e.getMessage());
- e.printStackTrace();
- }
- }
-
- private static void addBlobTransactions(String storageConnectionString) throws IOException, URISyntaxException, InvalidKeyException, StorageException {
- // Get the script to upload
- //
- InputStream scriptFileAsStream = QueryMetricsAndActivityLogs
- .class
- .getResourceAsStream("/install_apache.sh");
-
- // Get the size of the stream
- //
- int fileSize;
- ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
- byte[] buffer = new byte[256];
- int bytesRead;
- while ((bytesRead = scriptFileAsStream.read(buffer)) != -1) {
- outputStream.write(buffer, 0, bytesRead);
- }
- fileSize = outputStream.size();
- outputStream.close();
-
- // Upload the script file as block blob
- //
- CloudStorageAccount account = CloudStorageAccount.parse(storageConnectionString);
- CloudBlobClient cloudBlobClient = account.createCloudBlobClient();
- CloudBlobContainer container = cloudBlobClient.getContainerReference("scripts");
- container.createIfNotExists();
-
- ServiceProperties serviceProps = cloudBlobClient.downloadServiceProperties();
-
- // configure Storage logging and metrics
- LoggingProperties logProps = new LoggingProperties();
- logProps.setLogOperationTypes(EnumSet.of(LoggingOperations.READ, LoggingOperations.WRITE));
- logProps.setRetentionIntervalInDays(2);
- logProps.setVersion("1.0");
- serviceProps.setLogging(logProps);
-
- MetricsProperties metricProps = new MetricsProperties();
- metricProps.setMetricsLevel(MetricsLevel.SERVICE_AND_API);
- metricProps.setRetentionIntervalInDays(2);
- metricProps.setVersion("1.0");
- serviceProps.setHourMetrics(metricProps);
- serviceProps.setMinuteMetrics(metricProps);
-
- // Set the default service version to be used for anonymous requests.
- serviceProps.setDefaultServiceVersion("2015-04-05");
-
- // Set the service properties.
- cloudBlobClient.uploadServiceProperties(serviceProps);
-
- CloudBlockBlob blob = container.getBlockBlobReference("install_apache.sh");
- blob.upload(scriptFileAsStream, fileSize);
-
- // give sometime for the infrastructure to process the records and fit into time grain.
- SdkContext.sleep(6 * 60000);
- }
-}
diff --git a/src/main/resources/install_apache.sh b/src/main/resources/install_apache.sh
new file mode 100644
index 0000000..c2ec797
--- /dev/null
+++ b/src/main/resources/install_apache.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+sudo apt-get update
+
+# install apache
+sudo apt-get -y install apache2
+
+# restart Apache
+sudo apachectl restart
\ No newline at end of file