diff --git a/dd-sdk-android-core/api/apiSurface b/dd-sdk-android-core/api/apiSurface index 0334c02dd7..40a4510548 100644 --- a/dd-sdk-android-core/api/apiSurface +++ b/dd-sdk-android-core/api/apiSurface @@ -224,6 +224,8 @@ enum com.datadog.android.core.configuration.UploadFrequency - FREQUENT - AVERAGE - RARE +interface com.datadog.android.core.configuration.UploadSchedulerStrategy + fun getMsDelayUntilNextUpload(String, Int, Int?, Throwable?): Long interface com.datadog.android.core.constraints.DataConstraints fun validateAttributes(Map, String? = null, String? = null, Set = emptySet()): MutableMap fun validateTags(List): List diff --git a/dd-sdk-android-core/api/dd-sdk-android-core.api b/dd-sdk-android-core/api/dd-sdk-android-core.api index f3900d127c..5163bb7519 100644 --- a/dd-sdk-android-core/api/dd-sdk-android-core.api +++ b/dd-sdk-android-core/api/dd-sdk-android-core.api @@ -608,6 +608,10 @@ public final class com/datadog/android/core/configuration/UploadFrequency : java public static fun values ()[Lcom/datadog/android/core/configuration/UploadFrequency; } +public abstract interface class com/datadog/android/core/configuration/UploadSchedulerStrategy { + public abstract fun getMsDelayUntilNextUpload (Ljava/lang/String;ILjava/lang/Integer;Ljava/lang/Throwable;)J +} + public abstract interface class com/datadog/android/core/constraints/DataConstraints { public abstract fun validateAttributes (Ljava/util/Map;Ljava/lang/String;Ljava/lang/String;Ljava/util/Set;)Ljava/util/Map; public abstract fun validateTags (Ljava/util/List;)Ljava/util/List; diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/configuration/UploadSchedulerStrategy.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/configuration/UploadSchedulerStrategy.kt new file mode 100644 index 0000000000..c993595a83 --- /dev/null +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/configuration/UploadSchedulerStrategy.kt @@ -0,0 +1,34 @@ +/* + * Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. + * This product includes software developed at Datadog (https://www.datadoghq.com/). + * Copyright 2016-Present Datadog, Inc. + */ + +package com.datadog.android.core.configuration + +import com.datadog.android.api.feature.Feature + +/** + * Defines the strategy used to schedule the waiting period between batch uploads. + */ +interface UploadSchedulerStrategy { + + /** + * Should return the delay in milliseconds to wait until the next upload attempt + * is performed. + * @param featureName the name of the feature for which a new upload will be scheduled. Known feature names are + * listed in the [Feature.Companion] object. + * @param uploadAttempts the number of requests that were attempted during the last upload batch. Will be zero if + * the device is not ready (e.g.: when offline or with low battery) or no data is ready to be sent. + * If multiple batches can be uploaded, the attempts will stop at the first failure. + * @param lastStatusCode the HTTP status code of the last request (if available). A successful upload will have a + * status code 202 (Accepted). When null, it means that the network request didn't fully complete. + * @param throwable the exception thrown during the upload process (if any). + */ + fun getMsDelayUntilNextUpload( + featureName: String, + uploadAttempts: Int, + lastStatusCode: Int?, + throwable: Throwable? + ): Long +} diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/SdkFeature.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/SdkFeature.kt index 8f56384073..20959bd28b 100644 --- a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/SdkFeature.kt +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/SdkFeature.kt @@ -21,11 +21,13 @@ import com.datadog.android.api.net.RequestFactory import com.datadog.android.api.storage.EventBatchWriter import com.datadog.android.api.storage.FeatureStorageConfiguration import com.datadog.android.api.storage.datastore.DataStoreHandler +import com.datadog.android.core.configuration.UploadSchedulerStrategy import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.core.internal.data.upload.DataFlusher import com.datadog.android.core.internal.data.upload.DataOkHttpUploader import com.datadog.android.core.internal.data.upload.DataUploadScheduler import com.datadog.android.core.internal.data.upload.DataUploader +import com.datadog.android.core.internal.data.upload.DefaultUploadSchedulerStrategy import com.datadog.android.core.internal.data.upload.NoOpDataUploader import com.datadog.android.core.internal.data.upload.NoOpUploadScheduler import com.datadog.android.core.internal.data.upload.UploadScheduler @@ -88,14 +90,15 @@ internal class SdkFeature( return } - var dataUploadConfiguration: DataUploadConfiguration? = null if (wrappedFeature is StorageBackedFeature) { val uploadFrequency = coreFeature.uploadFrequency val batchProcessingLevel = coreFeature.batchProcessingLevel - dataUploadConfiguration = DataUploadConfiguration( + + val dataUploadConfiguration = DataUploadConfiguration( uploadFrequency, batchProcessingLevel.maxBatchesPerUploadJob ) + val uploadSchedulerStrategy = DefaultUploadSchedulerStrategy(dataUploadConfiguration) storage = prepareStorage( dataUploadConfiguration, wrappedFeature, @@ -103,12 +106,12 @@ internal class SdkFeature( instanceId, coreFeature.persistenceStrategyFactory ) - } - wrappedFeature.onInitialize(context) + wrappedFeature.onInitialize(context) - if (wrappedFeature is StorageBackedFeature && dataUploadConfiguration != null) { - setupUploader(wrappedFeature, dataUploadConfiguration) + setupUploader(wrappedFeature, uploadSchedulerStrategy, dataUploadConfiguration.maxBatchesPerUploadJob) + } else { + wrappedFeature.onInitialize(context) } if (wrappedFeature is TrackingConsentProviderCallback) { @@ -229,7 +232,7 @@ internal class SdkFeature( // region Internal private fun setupMetricsDispatcher( - dataUploadConfiguration: DataUploadConfiguration, + dataUploadConfiguration: DataUploadConfiguration?, filePersistenceConfig: FilePersistenceConfig, context: Context ) { @@ -251,7 +254,8 @@ internal class SdkFeature( private fun setupUploader( feature: StorageBackedFeature, - uploadConfiguration: DataUploadConfiguration + uploadSchedulerStrategy: UploadSchedulerStrategy, + maxBatchesPerJob: Int ) { uploadScheduler = if (coreFeature.isMainProcess) { uploader = createUploader(feature.requestFactory) @@ -262,7 +266,8 @@ internal class SdkFeature( coreFeature.contextProvider, coreFeature.networkInfoProvider, coreFeature.systemInfoProvider, - uploadConfiguration, + uploadSchedulerStrategy, + maxBatchesPerJob, coreFeature.uploadExecutorService, internalLogger ) @@ -274,7 +279,7 @@ internal class SdkFeature( // region Feature setup private fun prepareStorage( - dataUploadConfiguration: DataUploadConfiguration, + dataUploadConfiguration: DataUploadConfiguration?, wrappedFeature: StorageBackedFeature, context: Context, instanceId: String, diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnable.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnable.kt index 2b35ed043d..36033e1a55 100644 --- a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnable.kt +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnable.kt @@ -11,8 +11,8 @@ import com.datadog.android.api.InternalLogger import com.datadog.android.api.context.DatadogContext import com.datadog.android.api.context.NetworkInfo import com.datadog.android.api.storage.RawBatchEvent +import com.datadog.android.core.configuration.UploadSchedulerStrategy import com.datadog.android.core.internal.ContextProvider -import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.core.internal.metrics.RemovalReason import com.datadog.android.core.internal.net.info.NetworkInfoProvider import com.datadog.android.core.internal.persistence.BatchId @@ -21,9 +21,6 @@ import com.datadog.android.core.internal.system.SystemInfoProvider import com.datadog.android.core.internal.utils.scheduleSafe import java.util.concurrent.ScheduledThreadPoolExecutor import java.util.concurrent.TimeUnit -import kotlin.math.max -import kotlin.math.min -import kotlin.math.roundToLong internal class DataUploadRunnable( private val featureName: String, @@ -33,53 +30,44 @@ internal class DataUploadRunnable( private val contextProvider: ContextProvider, private val networkInfoProvider: NetworkInfoProvider, private val systemInfoProvider: SystemInfoProvider, - uploadConfiguration: DataUploadConfiguration, + internal val uploadSchedulerStrategy: UploadSchedulerStrategy, + internal val maxBatchesPerJob: Int, private val internalLogger: InternalLogger ) : UploadRunnable { - internal var currentDelayIntervalMs = uploadConfiguration.defaultDelayMs - internal val minDelayMs = uploadConfiguration.minDelayMs - internal val maxDelayMs = uploadConfiguration.maxDelayMs - internal val maxBatchesPerJob = uploadConfiguration.maxBatchesPerUploadJob - // region Runnable @WorkerThread override fun run() { + var uploadAttempts = 0 + var lastBatchUploadStatus: UploadStatus? = null if (isNetworkAvailable() && isSystemReady()) { val context = contextProvider.context var batchConsumerAvailableAttempts = maxBatchesPerJob - var lastBatchUploadStatus: UploadStatus? do { batchConsumerAvailableAttempts-- lastBatchUploadStatus = handleNextBatch(context) - } while (batchConsumerAvailableAttempts > 0 && - lastBatchUploadStatus is UploadStatus.Success + if (lastBatchUploadStatus != null) { + uploadAttempts++ + } + } while ( + batchConsumerAvailableAttempts > 0 && lastBatchUploadStatus is UploadStatus.Success ) - if (lastBatchUploadStatus != null) { - handleBatchConsumingJobFrequency(lastBatchUploadStatus) - } else { - // there was no batch left or there was a problem reading the next batch - // in the storage so we increase the interval - increaseInterval(lastBatchUploadStatus) - } } - scheduleNextUpload() + val delayMs = uploadSchedulerStrategy.getMsDelayUntilNextUpload( + featureName, + uploadAttempts, + lastBatchUploadStatus?.code, + lastBatchUploadStatus?.throwable + ) + scheduleNextUpload(delayMs) } // endregion // region Internal - private fun handleBatchConsumingJobFrequency(lastBatchUploadStatus: UploadStatus) { - if (lastBatchUploadStatus.shouldRetry) { - increaseInterval(lastBatchUploadStatus) - } else { - decreaseInterval() - } - } - @WorkerThread @Suppress("UnsafeThirdPartyFunctionCall") // called inside a dedicated executor private fun handleNextBatch(context: DatadogContext): UploadStatus? { @@ -109,11 +97,11 @@ internal class DataUploadRunnable( return hasEnoughPower && !systemInfo.powerSaveMode } - private fun scheduleNextUpload() { + private fun scheduleNextUpload(delayMs: Long) { threadPoolExecutor.remove(this) threadPoolExecutor.scheduleSafe( "$featureName: data upload", - currentDelayIntervalMs, + delayMs, TimeUnit.MILLISECONDS, internalLogger, this @@ -137,37 +125,9 @@ internal class DataUploadRunnable( return status } - @Suppress("UnsafeThirdPartyFunctionCall") // rounded Double isn't NaN - private fun decreaseInterval() { - currentDelayIntervalMs = max( - minDelayMs, - @Suppress("UnsafeThirdPartyFunctionCall") // not a NaN - (currentDelayIntervalMs * DECREASE_PERCENT).roundToLong() - ) - } - - @Suppress("UnsafeThirdPartyFunctionCall") // rounded Double isn't NaN - private fun increaseInterval(status: UploadStatus?) { - currentDelayIntervalMs = if (status is UploadStatus.DNSError) { - // A DNS error will likely not be a fluke, so we use a longer delay to avoid infinite looping - // and prevent battery draining - maxDelayMs * DNS_DELAY_MULTIPLIER - } else { - min( - maxDelayMs, - @Suppress("UnsafeThirdPartyFunctionCall") // not a NaN - (currentDelayIntervalMs * INCREASE_PERCENT).roundToLong() - ) - } - } - // endregion companion object { internal const val LOW_BATTERY_THRESHOLD = 10 - const val DECREASE_PERCENT = 0.90 - const val INCREASE_PERCENT = 1.10 - - internal const val DNS_DELAY_MULTIPLIER = 100 } } diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadScheduler.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadScheduler.kt index 3008243bbd..4b2fc70fdd 100644 --- a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadScheduler.kt +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DataUploadScheduler.kt @@ -7,8 +7,8 @@ package com.datadog.android.core.internal.data.upload import com.datadog.android.api.InternalLogger +import com.datadog.android.core.configuration.UploadSchedulerStrategy import com.datadog.android.core.internal.ContextProvider -import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.core.internal.net.info.NetworkInfoProvider import com.datadog.android.core.internal.persistence.Storage import com.datadog.android.core.internal.system.SystemInfoProvider @@ -22,20 +22,22 @@ internal class DataUploadScheduler( contextProvider: ContextProvider, networkInfoProvider: NetworkInfoProvider, systemInfoProvider: SystemInfoProvider, - uploadConfiguration: DataUploadConfiguration, + uploadSchedulerStrategy: UploadSchedulerStrategy, + maxBatchesPerJob: Int, private val scheduledThreadPoolExecutor: ScheduledThreadPoolExecutor, private val internalLogger: InternalLogger ) : UploadScheduler { internal val runnable = DataUploadRunnable( - featureName, - scheduledThreadPoolExecutor, - storage, - dataUploader, - contextProvider, - networkInfoProvider, - systemInfoProvider, - uploadConfiguration, + featureName = featureName, + threadPoolExecutor = scheduledThreadPoolExecutor, + storage = storage, + dataUploader = dataUploader, + contextProvider = contextProvider, + networkInfoProvider = networkInfoProvider, + systemInfoProvider = systemInfoProvider, + uploadSchedulerStrategy = uploadSchedulerStrategy, + maxBatchesPerJob = maxBatchesPerJob, internalLogger = internalLogger ) diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategy.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategy.kt new file mode 100644 index 0000000000..7dcb062671 --- /dev/null +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategy.kt @@ -0,0 +1,74 @@ +/* + * Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. + * This product includes software developed at Datadog (https://www.datadoghq.com/). + * Copyright 2016-Present Datadog, Inc. + */ + +package com.datadog.android.core.internal.data.upload + +import com.datadog.android.core.configuration.UploadSchedulerStrategy +import com.datadog.android.core.internal.configuration.DataUploadConfiguration +import com.datadog.android.core.internal.data.upload.DataOkHttpUploader.Companion.HTTP_ACCEPTED +import java.io.IOException +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.TimeUnit +import kotlin.math.max +import kotlin.math.min +import kotlin.math.roundToLong + +internal class DefaultUploadSchedulerStrategy( + internal val uploadConfiguration: DataUploadConfiguration +) : UploadSchedulerStrategy { + + private val currentDelays = ConcurrentHashMap() + + // region UploadSchedulerStrategy + + override fun getMsDelayUntilNextUpload( + featureName: String, + uploadAttempts: Int, + lastStatusCode: Int?, + throwable: Throwable? + ): Long { + val previousDelay = currentDelays.getOrPut(featureName) { uploadConfiguration.defaultDelayMs } + val updatedDelay = if (uploadAttempts > 0 && throwable == null && lastStatusCode == HTTP_ACCEPTED) { + decreaseInterval(previousDelay) + } else { + increaseInterval(previousDelay, throwable) + } + currentDelays[featureName] = updatedDelay + return updatedDelay + } + + // endregion + + // region Internal + + private fun decreaseInterval(previousDelay: Long): Long { + @Suppress("UnsafeThirdPartyFunctionCall") // not a NaN + val newDelayMs = (previousDelay * DECREASE_PERCENT).roundToLong() + return max(uploadConfiguration.minDelayMs, newDelayMs) + } + + private fun increaseInterval(previousDelay: Long, throwable: Throwable?): Long { + @Suppress("UnsafeThirdPartyFunctionCall") // not a NaN + val newDelayMs = (previousDelay * INCREASE_PERCENT).roundToLong() + + return if (throwable is IOException) { + // An IOException can mean a DNS error, or network connection loss + // Those aren't likely to be a fluke or flakiness, so we use a longer delay to avoid infinite looping + // and prevent battery draining + NETWORK_ERROR_DELAY_MS + } else { + min(uploadConfiguration.maxDelayMs, newDelayMs) + } + } + + // endregion + + companion object { + internal const val DECREASE_PERCENT = 0.90 + internal const val INCREASE_PERCENT = 1.10 + internal val NETWORK_ERROR_DELAY_MS = TimeUnit.MINUTES.toMillis(1) // 1 minute delay + } +} diff --git a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/metrics/BatchMetricsDispatcher.kt b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/metrics/BatchMetricsDispatcher.kt index 74ea4f64a1..46ed6a066e 100644 --- a/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/metrics/BatchMetricsDispatcher.kt +++ b/dd-sdk-android-core/src/main/kotlin/com/datadog/android/core/internal/metrics/BatchMetricsDispatcher.kt @@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicBoolean internal class BatchMetricsDispatcher( featureName: String, - private val uploadConfiguration: DataUploadConfiguration, + private val uploadConfiguration: DataUploadConfiguration?, private val filePersistenceConfig: FilePersistenceConfig, private val internalLogger: InternalLogger, private val dateTimeProvider: TimeProvider, @@ -104,8 +104,8 @@ internal class BatchMetricsDispatcher( TYPE_KEY to BATCH_DELETED_TYPE_VALUE, BATCH_AGE_KEY to fileAgeInMillis, UPLOADER_DELAY_KEY to mapOf( - UPLOADER_DELAY_MIN_KEY to uploadConfiguration.minDelayMs, - UPLOADER_DELAY_MAX_KEY to uploadConfiguration.maxDelayMs + UPLOADER_DELAY_MIN_KEY to uploadConfiguration?.minDelayMs, + UPLOADER_DELAY_MAX_KEY to uploadConfiguration?.maxDelayMs ), UPLOADER_WINDOW_KEY to filePersistenceConfig.recentDelayMs, diff --git a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/SdkFeatureTest.kt b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/SdkFeatureTest.kt index 68d7d1e0ee..d86f5d167b 100644 --- a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/SdkFeatureTest.kt +++ b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/SdkFeatureTest.kt @@ -24,6 +24,7 @@ import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.core.internal.data.upload.DataOkHttpUploader import com.datadog.android.core.internal.data.upload.DataUploadRunnable import com.datadog.android.core.internal.data.upload.DataUploadScheduler +import com.datadog.android.core.internal.data.upload.DefaultUploadSchedulerStrategy import com.datadog.android.core.internal.data.upload.NoOpDataUploader import com.datadog.android.core.internal.data.upload.NoOpUploadScheduler import com.datadog.android.core.internal.data.upload.UploadScheduler @@ -181,15 +182,11 @@ internal class SdkFeatureTest { testedFeature.initialize(appContext.mockInstance, fakeInstanceId) // Then - assertThat(testedFeature.uploadScheduler) - .isInstanceOf(DataUploadScheduler::class.java) + assertThat(testedFeature.uploadScheduler).isInstanceOf(DataUploadScheduler::class.java) val dataUploadRunnable = (testedFeature.uploadScheduler as DataUploadScheduler).runnable - assertThat(dataUploadRunnable.minDelayMs).isEqualTo(expectedUploadConfiguration.minDelayMs) - assertThat(dataUploadRunnable.maxDelayMs).isEqualTo(expectedUploadConfiguration.maxDelayMs) - assertThat(dataUploadRunnable.currentDelayIntervalMs) - .isEqualTo(expectedUploadConfiguration.defaultDelayMs) - assertThat(dataUploadRunnable.maxBatchesPerJob) - .isEqualTo(fakeCoreBatchProcessingLevel.maxBatchesPerUploadJob) + val uploadSchedulerStrategy = (dataUploadRunnable.uploadSchedulerStrategy as? DefaultUploadSchedulerStrategy) + assertThat(uploadSchedulerStrategy?.uploadConfiguration).isEqualTo(expectedUploadConfiguration) + assertThat(dataUploadRunnable.maxBatchesPerJob).isEqualTo(fakeCoreBatchProcessingLevel.maxBatchesPerUploadJob) argumentCaptor { verify(coreFeature.mockUploadExecutor).execute( argThat { this is DataUploadRunnable } diff --git a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnableTest.kt b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnableTest.kt index da7825db6c..4cea35d4c0 100644 --- a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnableTest.kt +++ b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadRunnableTest.kt @@ -10,6 +10,7 @@ import com.datadog.android.api.InternalLogger import com.datadog.android.api.context.DatadogContext import com.datadog.android.api.context.NetworkInfo import com.datadog.android.api.storage.RawBatchEvent +import com.datadog.android.core.configuration.UploadSchedulerStrategy import com.datadog.android.core.internal.ContextProvider import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.core.internal.net.info.NetworkInfoProvider @@ -19,14 +20,13 @@ import com.datadog.android.core.internal.persistence.Storage import com.datadog.android.core.internal.system.SystemInfo import com.datadog.android.core.internal.system.SystemInfoProvider import com.datadog.android.utils.forge.Configurator -import com.datadog.tools.unit.forge.anException import fr.xgouchet.elmyr.Forge import fr.xgouchet.elmyr.annotation.Forgery import fr.xgouchet.elmyr.annotation.IntForgery +import fr.xgouchet.elmyr.annotation.LongForgery import fr.xgouchet.elmyr.annotation.StringForgery import fr.xgouchet.elmyr.junit5.ForgeConfiguration import fr.xgouchet.elmyr.junit5.ForgeExtension -import org.assertj.core.api.Assertions.assertThat import org.junit.jupiter.api.BeforeEach import org.junit.jupiter.api.Test import org.junit.jupiter.api.extension.ExtendWith @@ -38,13 +38,12 @@ import org.mockito.invocation.InvocationOnMock import org.mockito.junit.jupiter.MockitoExtension import org.mockito.junit.jupiter.MockitoSettings import org.mockito.kotlin.any -import org.mockito.kotlin.argumentCaptor +import org.mockito.kotlin.anyOrNull import org.mockito.kotlin.doAnswer import org.mockito.kotlin.doReturn import org.mockito.kotlin.eq import org.mockito.kotlin.mock import org.mockito.kotlin.reset -import org.mockito.kotlin.same import org.mockito.kotlin.times import org.mockito.kotlin.verify import org.mockito.kotlin.verifyNoInteractions @@ -84,31 +83,31 @@ internal class DataUploadRunnableTest { @Mock lateinit var mockInternalLogger: InternalLogger + @Mock + lateinit var mockUploadSchedulerStrategy: UploadSchedulerStrategy + @Forgery lateinit var fakeContext: DatadogContext - @Forgery - lateinit var fakeDataUploadConfiguration: DataUploadConfiguration + @IntForgery(min = 1, max = 4) + var fakeMaxBatchesPerJob: Int = 0 + + @LongForgery + var fakeDelayUntilNextUploadMs: Long = 0L @StringForgery lateinit var fakeFeatureName: String - private var expectedBatchesHandled: Int = 0 - private lateinit var testedRunnable: DataUploadRunnable @BeforeEach fun `set up`(forge: Forge) { - // to make sure the existing tests based only on 1 batch are not broken - fakeDataUploadConfiguration = fakeDataUploadConfiguration.copy(maxBatchesPerUploadJob = 1) - val fakeNetworkInfo = - NetworkInfo( - forge.aValueFrom( - enumClass = NetworkInfo.Connectivity::class.java, - exclude = listOf(NetworkInfo.Connectivity.NETWORK_NOT_CONNECTED) - ) + val fakeNetworkInfo = NetworkInfo( + forge.aValueFrom( + enumClass = NetworkInfo.Connectivity::class.java, + exclude = listOf(NetworkInfo.Connectivity.NETWORK_NOT_CONNECTED) ) - expectedBatchesHandled = fakeDataUploadConfiguration.maxBatchesPerUploadJob + ) whenever(mockNetworkInfoProvider.getLatestNetworkInfo()) doReturn fakeNetworkInfo val fakeSystemInfo = SystemInfo( batteryFullOrCharging = true, @@ -117,6 +116,8 @@ internal class DataUploadRunnableTest { onExternalPowerSource = true ) whenever(mockSystemInfoProvider.getLatestSystemInfo()) doReturn fakeSystemInfo + whenever(mockUploadSchedulerStrategy.getMsDelayUntilNextUpload(any(), any(), anyOrNull(), anyOrNull())) + .doReturn(fakeDelayUntilNextUploadMs) whenever(mockContextProvider.context) doReturn fakeContext @@ -128,7 +129,8 @@ internal class DataUploadRunnableTest { mockContextProvider, mockNetworkInfoProvider, mockSystemInfoProvider, - fakeDataUploadConfiguration, + mockUploadSchedulerStrategy, + fakeMaxBatchesPerJob, mockInternalLogger ) } @@ -146,11 +148,8 @@ internal class DataUploadRunnableTest { // Then verifyNoInteractions(mockDataUploader, mockStorage) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -174,29 +173,32 @@ internal class DataUploadRunnableTest { whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) whenever(mockSystemInfoProvider.getLatestSystemInfo()) doReturn fakeSystemInfo + val fakeUploadStatus = forge.getForgery(UploadStatus.Success::class.java) whenever( mockDataUploader.upload( fakeContext, batch, batchMetadata ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) + ) doReturn fakeUploadStatus // When testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( + verify(mockStorage, times(fakeMaxBatchesPerJob)).confirmBatchRead( eq(batchId), any(), eq(true) ) - verify(mockDataUploader, times(expectedBatchesHandled)).upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockDataUploader, times(fakeMaxBatchesPerJob)).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( + fakeFeatureName, + fakeMaxBatchesPerJob, + fakeUploadStatus.code, + null ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -219,30 +221,32 @@ internal class DataUploadRunnableTest { whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) whenever(mockSystemInfoProvider.getLatestSystemInfo()) doReturn fakeSystemInfo + val fakeUploadStatus = forge.getForgery(UploadStatus.Success::class.java) whenever( mockDataUploader.upload( fakeContext, batch, batchMetadata ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) + ) doReturn fakeUploadStatus // When testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( + verify(mockStorage, times(fakeMaxBatchesPerJob)).confirmBatchRead( eq(batchId), any(), eq(true) ) - verify(mockDataUploader, times(expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockDataUploader, times(fakeMaxBatchesPerJob)).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( + fakeFeatureName, + fakeMaxBatchesPerJob, + fakeUploadStatus.code, + null ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -264,30 +268,32 @@ internal class DataUploadRunnableTest { whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) whenever(mockSystemInfoProvider.getLatestSystemInfo()) doReturn fakeSystemInfo + val fakeUploadStatus = forge.getForgery(UploadStatus.Success::class.java) whenever( mockDataUploader.upload( fakeContext, batch, batchMetadata ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) + ) doReturn fakeUploadStatus // When testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( + verify(mockStorage, times(fakeMaxBatchesPerJob)).confirmBatchRead( eq(batchId), any(), eq(true) ) - verify(mockDataUploader, times(expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockDataUploader, times(fakeMaxBatchesPerJob)).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( + fakeFeatureName, + fakeMaxBatchesPerJob, + fakeUploadStatus.code, + null ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -308,11 +314,8 @@ internal class DataUploadRunnableTest { // Then verifyNoInteractions(mockStorage, mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -333,11 +336,8 @@ internal class DataUploadRunnableTest { // Then verifyNoInteractions(mockStorage, mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -358,11 +358,8 @@ internal class DataUploadRunnableTest { // Then verifyNoInteractions(mockStorage, mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -383,11 +380,8 @@ internal class DataUploadRunnableTest { // Then verifyNoInteractions(mockStorage, mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -402,11 +396,8 @@ internal class DataUploadRunnableTest { verify(mockStorage).readNextBatch() verifyNoMoreInteractions(mockStorage) verifyNoInteractions(mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - eq(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -420,30 +411,29 @@ internal class DataUploadRunnableTest { val batchMetadata = forge.aNullable { batchMeta.toByteArray() } whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) + val fakeUploadStatus = forge.getForgery(UploadStatus.Success::class.java) whenever( mockDataUploader.upload( fakeContext, batch, batchMetadata ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) + ) doReturn fakeUploadStatus // When testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( - any(), - any(), - eq(true) - ) - verify(mockDataUploader, times(expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockStorage, times(fakeMaxBatchesPerJob)).confirmBatchRead(any(), any(), eq(true)) + verify(mockDataUploader, times(fakeMaxBatchesPerJob)).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( + fakeFeatureName, + fakeMaxBatchesPerJob, + fakeUploadStatus.code, + null ) + verify(mockThreadPoolExecutor).remove(testedRunnable) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @ParameterizedTest @@ -471,18 +461,15 @@ internal class DataUploadRunnableTest { testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( - eq(batchId), - any(), - eq(false) - ) - verify(mockDataUploader, times(expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockStorage).confirmBatchRead(eq(batchId), any(), eq(false)) + verify(mockDataUploader).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( + fakeFeatureName, + 1, + uploadStatus.code, + uploadStatus.throwable ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @ParameterizedTest @@ -513,18 +500,16 @@ internal class DataUploadRunnableTest { } // Then - verify(mockStorage, times(runCount)).confirmBatchRead( - eq(batchId), - any(), - eq(false) - ) - verify(mockDataUploader, times(runCount * expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor, times(runCount)).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) + verify(mockStorage, times(runCount)).confirmBatchRead(eq(batchId), any(), eq(false)) + verify(mockDataUploader, times(runCount)).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy, times(runCount)).getMsDelayUntilNextUpload( + fakeFeatureName, + 1, + uploadStatus.code, + uploadStatus.throwable ) + verify(mockThreadPoolExecutor, times(runCount)) + .schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @ParameterizedTest @@ -552,326 +537,15 @@ internal class DataUploadRunnableTest { testedRunnable.run() // Then - verify(mockStorage, times(expectedBatchesHandled)).confirmBatchRead( - eq(batchId), - any(), - eq(true) - ) - verify(mockDataUploader, times(expectedBatchesHandled)) - .upload(fakeContext, batch, batchMetadata) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) - } - - @Test - fun `when has batches the upload frequency will increase`( - @Forgery batch: List, - @StringForgery batchMeta: String, - forge: Forge - ) { - // Given - val batchId = mock() - val batchMetadata = forge.aNullable { batchMeta.toByteArray() } - whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) - whenever( - mockDataUploader.upload( - fakeContext, - batch, - batchMetadata - ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) - - // When - repeat(5) { - testedRunnable.run() - } - - // Then - val captor = argumentCaptor() - verify( - mockThreadPoolExecutor, - times(5 * expectedBatchesHandled) - ) - .schedule(same(testedRunnable), captor.capture(), eq(TimeUnit.MILLISECONDS)) - captor.allValues.reduce { previous, next -> - assertThat(next).isLessThan(previous) - next - } - } - - @Test - fun `M reduce delay between runs W upload is successful`( - @Forgery batch: List, - @StringForgery batchMeta: String, - @IntForgery(16, 64) runCount: Int, - forge: Forge - ) { - // Given - val batchId = mock() - val batchMetadata = forge.aNullable { batchMeta.toByteArray() } - - whenever(mockStorage.readNextBatch()).thenReturn(BatchData(batchId, batch, batchMetadata)) - whenever( - mockDataUploader.upload( - fakeContext, - batch, - batchMetadata - ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) - - // When - repeat(runCount) { - testedRunnable.run() - } - - // Then - argumentCaptor { - verify(mockThreadPoolExecutor, times(runCount)) - .schedule( - same(testedRunnable), - capture(), - eq(TimeUnit.MILLISECONDS) - ) - - allValues.reduce { previous, next -> - assertThat(next) - .isLessThanOrEqualTo(previous) - .isBetween(testedRunnable.minDelayMs, testedRunnable.maxDelayMs) - next - } - } - } - - @ParameterizedTest - @MethodSource("dropBatchStatusValues") - fun `M reduce delay between runs W batch fails and should be dropped`( - uploadStatus: UploadStatus, - @IntForgery(16, 64) runCount: Int, - forge: Forge, - @Forgery fakeConfiguration: DataUploadConfiguration - ) { - // Given - testedRunnable = DataUploadRunnable( + verify(mockStorage).confirmBatchRead(eq(batchId), any(), eq(true)) + verify(mockDataUploader).upload(fakeContext, batch, batchMetadata) + verify(mockUploadSchedulerStrategy).getMsDelayUntilNextUpload( fakeFeatureName, - mockThreadPoolExecutor, - mockStorage, - mockDataUploader, - mockContextProvider, - mockNetworkInfoProvider, - mockSystemInfoProvider, - fakeConfiguration, - mockInternalLogger + 1, + uploadStatus.code, + uploadStatus.throwable ) - // extra batches to make sure we are not reaching the limit as this will fall into the - // else branch and increase the interval making the test to fail - val batches = forge.aList(size = runCount * fakeConfiguration.maxBatchesPerUploadJob + 10) { - aList { getForgery() } - } - val batchIds: List = batches.map { mock() } - val randomFailIndex = forge.anInt(min = 0, max = batches.size) - val batchMetadata = forge.aList(size = batches.size) { - aNullable { aString().toByteArray() } - } - - stubStorage(batchIds, batches, batchMetadata) - - batches.forEachIndexed { index, batch -> - val expectedStatus = if (index == randomFailIndex) { - uploadStatus - } else { - forge.getForgery(UploadStatus.Success::class.java) - } - whenever( - mockDataUploader.upload( - fakeContext, - batch, - batchMetadata[index] - ) - ) doReturn expectedStatus - } - - // When - repeat(runCount) { - testedRunnable.run() - } - - // Then - argumentCaptor { - verify(mockThreadPoolExecutor, times(runCount)) - .schedule( - same(testedRunnable), - capture(), - eq(TimeUnit.MILLISECONDS) - ) - - allValues.reduce { previous, next -> - assertThat(next) - .isLessThanOrEqualTo(previous) - .isBetween(testedRunnable.minDelayMs, testedRunnable.maxDelayMs) - next - } - } - } - - @Test - fun `M increase delay between runs W no batch available`( - @IntForgery(16, 64) runCount: Int - ) { - // When - whenever(mockStorage.readNextBatch()) doReturn null - repeat(runCount) { - testedRunnable.run() - } - - // Then - argumentCaptor { - verify(mockThreadPoolExecutor, times(runCount)) - .schedule( - same(testedRunnable), - capture(), - eq(TimeUnit.MILLISECONDS) - ) - - allValues.reduce { previous, next -> - assertThat(next) - .isGreaterThanOrEqualTo(previous) - .isBetween(testedRunnable.minDelayMs, testedRunnable.maxDelayMs) - next - } - } - } - - @ParameterizedTest - @MethodSource("retryBatchStatusValues") - fun `M increase delay between runs W batch fails and should be retried`( - status: UploadStatus, - @IntForgery(1, 10) runCount: Int, - forge: Forge, - @Forgery fakeConfiguration: DataUploadConfiguration - ) { - // Given - testedRunnable = DataUploadRunnable( - fakeFeatureName, - mockThreadPoolExecutor, - mockStorage, - mockDataUploader, - mockContextProvider, - mockNetworkInfoProvider, - mockSystemInfoProvider, - fakeConfiguration, - mockInternalLogger - ) - val batches = forge.aList(size = runCount * fakeConfiguration.maxBatchesPerUploadJob) { - aList { getForgery() } - } - val batchIds: List = batches.map { mock() } - val failIndexesSet = mutableSetOf().apply { - var index = 0 - repeat(runCount) { - add(index) - index += fakeConfiguration.maxBatchesPerUploadJob - } - } - val batchMetadata = forge.aList(size = batches.size) { - aNullable { aString().toByteArray() } - } - - stubStorage(batchIds, batches, batchMetadata) - batches.forEachIndexed { index, batch -> - val expectedStatus = if (index in failIndexesSet) { - status - } else { - forge.getForgery(UploadStatus.Success::class.java) - } - whenever( - mockDataUploader.upload( - fakeContext, - batch, - batchMetadata[index] - ) - ) doReturn expectedStatus - } - - // When - repeat(runCount) { - testedRunnable.run() - } - - // Then - argumentCaptor { - verify(mockThreadPoolExecutor, times(runCount)) - .schedule( - same(testedRunnable), - capture(), - eq(TimeUnit.MILLISECONDS) - ) - - allValues.reduce { previous, next -> - assertThat(next) - .isGreaterThanOrEqualTo(previous) - .isBetween(testedRunnable.minDelayMs, testedRunnable.maxDelayMs) - next - } - } - } - - @Test - fun `M increase delay between runs W batch fails because of DNS`( - @IntForgery(1, 10) runCount: Int, - forge: Forge, - @Forgery fakeConfiguration: DataUploadConfiguration - ) { - // Given - testedRunnable = DataUploadRunnable( - fakeFeatureName, - mockThreadPoolExecutor, - mockStorage, - mockDataUploader, - mockContextProvider, - mockNetworkInfoProvider, - mockSystemInfoProvider, - fakeConfiguration, - mockInternalLogger - ) - val batches = forge.aList(size = runCount * fakeConfiguration.maxBatchesPerUploadJob) { - aList { getForgery() } - } - val batchIds: List = batches.map { mock() } - val batchMetadata = forge.aList(size = batches.size) { - aNullable { aString().toByteArray() } - } - stubStorage(batchIds, batches, batchMetadata) - batches.forEachIndexed { index, batch -> - whenever( - mockDataUploader.upload( - fakeContext, - batch, - batchMetadata[index] - ) - ) doReturn UploadStatus.DNSError(forge.anException()) - } - - // When - repeat(runCount) { - testedRunnable.run() - } - - // Then - argumentCaptor { - verify(mockThreadPoolExecutor, times(runCount)) - .schedule( - same(testedRunnable), - capture(), - eq(TimeUnit.MILLISECONDS) - ) - - allValues.forEach { delay -> - assertThat(delay).isEqualTo(testedRunnable.maxDelayMs * DataUploadRunnable.DNS_DELAY_MULTIPLIER) - } - } + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } // region maxBatchesPerJob @@ -890,7 +564,8 @@ internal class DataUploadRunnableTest { mockContextProvider, mockNetworkInfoProvider, mockSystemInfoProvider, - fakeConfiguration, + mockUploadSchedulerStrategy, + fakeMaxBatchesPerJob, mockInternalLogger ) val batches = forge.aList( @@ -918,7 +593,8 @@ internal class DataUploadRunnableTest { testedRunnable.run() // Then - batches.take(fakeConfiguration.maxBatchesPerUploadJob).forEachIndexed { index, batch -> + repeat(fakeMaxBatchesPerJob) { index -> + val batch = batches[index] verify(mockDataUploader).upload(fakeContext, batch, batchMetadata[index]) verify(mockStorage).confirmBatchRead( eq(batchIds[index]), @@ -927,11 +603,7 @@ internal class DataUploadRunnableTest { ) } verifyNoMoreInteractions(mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } @Test @@ -948,19 +620,20 @@ internal class DataUploadRunnableTest { mockContextProvider, mockNetworkInfoProvider, mockSystemInfoProvider, - fakeConfiguration, + mockUploadSchedulerStrategy, + fakeMaxBatchesPerJob, mockInternalLogger ) val fakeBatchesCount = forge.anInt( min = 1, - max = fakeConfiguration.maxBatchesPerUploadJob + 1 + max = fakeConfiguration.maxBatchesPerUploadJob + 4 ) - val batches = forge.aList( - size = fakeBatchesCount - ) { aList { getForgery() } } + val batches = forge.aList(size = fakeBatchesCount) { aList { getForgery() } } val batchIds: List = batches.map { mock() } val batchMetadata = forge.aList(size = batches.size) { aNullable { aString().toByteArray() } } stubStorage(batchIds, batches, batchMetadata) + val fakeUploadStatus = forge.getForgery(UploadStatus.Success::class.java) + batches.forEachIndexed { index, batch -> whenever( mockDataUploader.upload( @@ -968,14 +641,15 @@ internal class DataUploadRunnableTest { batch, batchMetadata[index] ) - ) doReturn forge.getForgery(UploadStatus.Success::class.java) + ) doReturn fakeUploadStatus } // When testedRunnable.run() // Then - batches.forEachIndexed { index, batch -> + repeat(fakeMaxBatchesPerJob) { index -> + val batch = batches[index] verify(mockDataUploader).upload(fakeContext, batch, batchMetadata[index]) verify(mockStorage).confirmBatchRead( eq(batchIds[index]), @@ -984,11 +658,7 @@ internal class DataUploadRunnableTest { ) } verifyNoMoreInteractions(mockDataUploader) - verify(mockThreadPoolExecutor).schedule( - same(testedRunnable), - any(), - eq(TimeUnit.MILLISECONDS) - ) + verify(mockThreadPoolExecutor).schedule(testedRunnable, fakeDelayUntilNextUploadMs, TimeUnit.MILLISECONDS) } // region Internal @@ -1000,16 +670,16 @@ internal class DataUploadRunnableTest { ) { reset(mockStorage) whenever(mockStorage.readNextBatch()) doAnswer object : Answer { - var count = 0 + var index = 0 override fun answer(invocation: InvocationOnMock): BatchData? { - val data = if (count >= batches.size) { + val data = if (index >= batches.size) { null } else { - val batchData = BatchData(batchIds[count], batches[count], batchMeta[count]) + val batchData = BatchData(batchIds[index], batches[index], batchMeta[index]) batchData } - count++ + index++ return data } } diff --git a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadSchedulerTest.kt b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadSchedulerTest.kt index 7cd6913f7a..e66fa553cb 100644 --- a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadSchedulerTest.kt +++ b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DataUploadSchedulerTest.kt @@ -6,9 +6,11 @@ package com.datadog.android.core.internal.data.upload +import com.datadog.android.core.configuration.UploadSchedulerStrategy import com.datadog.android.core.internal.configuration.DataUploadConfiguration import com.datadog.android.utils.forge.Configurator import fr.xgouchet.elmyr.annotation.Forgery +import fr.xgouchet.elmyr.annotation.IntForgery import fr.xgouchet.elmyr.annotation.StringForgery import fr.xgouchet.elmyr.junit5.ForgeConfiguration import fr.xgouchet.elmyr.junit5.ForgeExtension @@ -45,17 +47,24 @@ internal class DataUploadSchedulerTest { @StringForgery lateinit var fakeFeatureName: String + @Mock + lateinit var mockUploadSchedulerStrategy: UploadSchedulerStrategy + + @IntForgery(min = 1, max = 4) + var fakeMaxBatchesPerJob: Int = 0 + @BeforeEach fun `set up`() { testedScheduler = DataUploadScheduler( - fakeFeatureName, + featureName = fakeFeatureName, storage = mock(), dataUploader = mock(), contextProvider = mock(), networkInfoProvider = mock(), systemInfoProvider = mock(), - fakeUploadConfiguration, - mockExecutor, + uploadSchedulerStrategy = mockUploadSchedulerStrategy, + maxBatchesPerJob = fakeMaxBatchesPerJob, + scheduledThreadPoolExecutor = mockExecutor, internalLogger = mock() ) } diff --git a/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategyTest.kt b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategyTest.kt new file mode 100644 index 0000000000..22a66d1766 --- /dev/null +++ b/dd-sdk-android-core/src/test/kotlin/com/datadog/android/core/internal/data/upload/DefaultUploadSchedulerStrategyTest.kt @@ -0,0 +1,128 @@ +package com.datadog.android.core.internal.data.upload + +import com.datadog.android.core.configuration.UploadSchedulerStrategy +import com.datadog.android.core.internal.configuration.DataUploadConfiguration +import com.datadog.android.utils.forge.Configurator +import fr.xgouchet.elmyr.annotation.Forgery +import fr.xgouchet.elmyr.annotation.IntForgery +import fr.xgouchet.elmyr.annotation.StringForgery +import fr.xgouchet.elmyr.junit5.ForgeConfiguration +import fr.xgouchet.elmyr.junit5.ForgeExtension +import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.extension.ExtendWith +import org.junit.jupiter.api.extension.Extensions +import org.mockito.junit.jupiter.MockitoExtension +import org.mockito.junit.jupiter.MockitoSettings +import org.mockito.quality.Strictness +import java.io.IOException + +@Extensions( + ExtendWith(MockitoExtension::class), + ExtendWith(ForgeExtension::class) +) +@MockitoSettings(strictness = Strictness.LENIENT) +@ForgeConfiguration(Configurator::class) +internal class DefaultUploadSchedulerStrategyTest { + + lateinit var testedStrategy: UploadSchedulerStrategy + + @Forgery + lateinit var fakeConfiguration: DataUploadConfiguration + + @StringForgery + lateinit var fakeFeatureName: String + + var initialDelay = 0L + + @BeforeEach + fun `set up`() { + testedStrategy = DefaultUploadSchedulerStrategy(fakeConfiguration) + initialDelay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) + } + + @Test + fun `M decrease delay W getMsDelayUntilNextUpload() {successful attempt}`( + @IntForgery(1, 128) repeats: Int, + @IntForgery(1, 64) attempts: Int + ) { + // Given + var delay = 0L + + // When + repeat(repeats) { delay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, attempts, 202, null) } + + // Then + assertThat(delay).isLessThan(initialDelay) + assertThat(delay).isGreaterThanOrEqualTo(fakeConfiguration.minDelayMs) + } + + @Test + fun `M increase delay W getMsDelayUntilNextUpload() {no attempt made}`( + @IntForgery(1, 128) repeats: Int + ) { + // Given + var delay = 0L + + // When + repeat(repeats) { delay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, 0, null, null) } + + // Then + assertThat(delay).isGreaterThan(initialDelay) + assertThat(delay).isLessThanOrEqualTo(fakeConfiguration.maxDelayMs) + } + + @Test + fun `M increase delay W getMsDelayUntilNextUpload() {invalid status code}`( + @IntForgery(1, 128) repeats: Int, + @IntForgery(1, 64) attempts: Int, + @IntForgery(300, 600) statusCode: Int + ) { + // Given + var delay = 0L + + // When + repeat(repeats) { + delay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, attempts, statusCode, null) + } + + // Then + assertThat(delay).isGreaterThan(initialDelay) + assertThat(delay).isLessThanOrEqualTo(fakeConfiguration.maxDelayMs) + } + + @Test + fun `M increase delay W getMsDelayUntilNextUpload() {non IOException}`( + @IntForgery(1, 128) repeats: Int, + @IntForgery(1, 64) attempts: Int, + @Forgery exception: Exception + ) { + // Given + var delay = 0L + + // When + repeat(repeats) { delay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, attempts, null, exception) } + + // Then + assertThat(delay).isGreaterThan(initialDelay) + assertThat(delay).isLessThanOrEqualTo(fakeConfiguration.maxDelayMs) + } + + @Test + fun `M increase delay to high value W getMsDelayUntilNextUpload() {IOException}`( + @IntForgery(1, 128) repeats: Int, + @IntForgery(1, 64) attempts: Int, + @StringForgery message: String + ) { + // Given + var delay = 0L + val exception = IOException(message) + + // When + repeat(repeats) { delay = testedStrategy.getMsDelayUntilNextUpload(fakeFeatureName, attempts, null, exception) } + + // Then + assertThat(delay).isEqualTo(DefaultUploadSchedulerStrategy.NETWORK_ERROR_DELAY_MS) + } +} diff --git a/detekt_custom.yml b/detekt_custom.yml index 9ffcaa3b96..83eed153d8 100644 --- a/detekt_custom.yml +++ b/detekt_custom.yml @@ -612,6 +612,7 @@ datadog: - "java.util.concurrent.ConcurrentLinkedQueue.isNotEmpty()" - "java.util.concurrent.ConcurrentLinkedQueue.peek()" - "java.util.concurrent.ConcurrentLinkedQueue.poll()" + - "java.util.concurrent.ConcurrentHashMap.getOrPut(kotlin.String?, kotlin.Function0)" - "java.util.concurrent.CopyOnWriteArraySet.add(kotlin.String?)" - "java.util.concurrent.CopyOnWriteArraySet.constructor()" - "java.util.concurrent.CopyOnWriteArraySet.remove(kotlin.String?)" @@ -928,6 +929,7 @@ datadog: - "kotlin.collections.MutableMap.filterValues(kotlin.Function1)" - "kotlin.collections.MutableMap.forEach(kotlin.Function1)" - "kotlin.collections.MutableMap.get(kotlin.String)" + - "kotlin.collections.MutableMap.getOrPut(kotlin.String, kotlin.Function0)" - "kotlin.collections.MutableMap.isEmpty()" - "kotlin.collections.MutableMap.isNotEmpty()" - "kotlin.collections.MutableMap.iterator()" @@ -1073,6 +1075,7 @@ datadog: - "kotlin.math.max(kotlin.Int, kotlin.Int)" - "kotlin.math.max(kotlin.Long, kotlin.Long)" - "kotlin.math.min(kotlin.Double, kotlin.Double)" + - "kotlin.math.min(kotlin.Long, kotlin.Long)" - "kotlin.math.sqrt(kotlin.Double)" - "kotlin.String.trim(kotlin.Function1)" # endregion