From 2438d6846faef674e9262ab0af3910021cb233b1 Mon Sep 17 00:00:00 2001 From: Lavkesh Lahngir Date: Wed, 18 May 2022 12:31:40 +0800 Subject: [PATCH 1/9] Bq sink connector using depot repo. (#168) * feat: adding odpf/depot depedencies * feat: add github maven repo for odpf/depot * feat: add github auth * chore: depenedencies * chore: version bump * chore: version bump * refactor: move common code to seperate class --- build.gradle | 27 +- docs/docs/concepts/filters.md | 4 +- .../io/odpf/firehose/config/ErrorConfig.java | 2 +- .../converter/SetErrorTypeConverter.java | 2 +- .../consumer/FirehoseAsyncConsumer.java | 12 +- .../consumer/FirehoseConsumerFactory.java | 46 +- .../firehose/consumer/FirehoseFilter.java | 8 +- .../consumer/FirehoseSyncConsumer.java | 10 +- .../kafka/ConsumerAndOffsetManager.java | 10 +- .../consumer/kafka/FirehoseKafkaConsumer.java | 36 +- .../io/odpf/firehose/error/ErrorHandler.java | 1 + .../io/odpf/firehose/error/ErrorInfo.java | 15 - .../io/odpf/firehose/error/ErrorType.java | 11 - .../io/odpf/firehose/filter/NoOpFilter.java | 6 +- .../odpf/firehose/filter/jexl/JexlFilter.java | 12 +- .../odpf/firehose/filter/json/JsonFilter.java | 16 +- .../firehose/filter/json/JsonFilterUtil.java | 24 +- .../java/io/odpf/firehose/launch/Main.java | 39 +- .../java/io/odpf/firehose/launch/Task.java | 24 +- .../message/FirehoseMessageUtils.java | 24 ++ .../io/odpf/firehose/message/Message.java | 4 +- .../metrics/FirehoseInstrumentation.java | 159 +++++++ .../firehose/metrics/Instrumentation.java | 259 ----------- .../odpf/firehose/metrics/StatsDReporter.java | 91 ---- .../metrics/StatsDReporterFactory.java | 61 --- .../serializer/MessageToTemplatizedJson.java | 14 +- .../io/odpf/firehose/sink/AbstractSink.java | 38 +- .../odpf/firehose/sink/GenericOdpfSink.java | 48 +++ .../io/odpf/firehose/sink/SinkFactory.java | 37 +- .../odpf/firehose/sink/SinkFactoryUtils.java | 19 + .../firehose/sink/bigquery/BigQuerySink.java | 74 ---- .../sink/bigquery/BigQuerySinkFactory.java | 71 --- .../sink/bigquery/BigquerySinkUtils.java | 15 + .../converter/MessageRecordConverter.java | 94 ---- .../MessageRecordConverterCache.java | 8 - .../bigquery/converter/ProtoFieldFactory.java | 33 -- .../sink/bigquery/converter/RowMapper.java | 93 ---- .../converter/fields/ByteProtoField.java | 31 -- .../converter/fields/DefaultProtoField.java | 18 - .../converter/fields/EnumProtoField.java | 31 -- .../converter/fields/NestedProtoField.java | 23 - .../bigquery/converter/fields/ProtoField.java | 8 - .../converter/fields/StructProtoField.java | 45 -- .../converter/fields/TimestampProtoField.java | 45 -- .../sink/bigquery/error/ErrorDescriptor.java | 15 - .../sink/bigquery/error/ErrorParser.java | 37 -- .../bigquery/error/InvalidSchemaError.java | 27 -- .../sink/bigquery/error/OOBError.java | 26 -- .../sink/bigquery/error/StoppedError.java | 27 -- .../sink/bigquery/error/UnknownError.java | 21 - .../BQDatasetLocationChangedException.java | 8 - .../exception/BQPartitionKeyNotSpecified.java | 7 - .../exception/BQSchemaMappingException.java | 7 - .../exception/BQTableUpdateFailure.java | 7 - .../exception/BigQuerySinkException.java | 7 - .../exception/ConfigurationException.java | 7 - .../exception/ProtoNotFoundException.java | 7 - .../bigquery/handler/BQTableDefinition.java | 64 --- .../sink/bigquery/handler/BigQueryClient.java | 195 --------- .../handler/BigQueryResponseParser.java | 71 --- .../sink/bigquery/handler/BigQueryRow.java | 14 - .../handler/BigQueryRowWithInsertId.java | 11 - .../handler/BigQueryRowWithoutInsertId.java | 12 - .../sink/bigquery/models/BQField.java | 102 ----- .../sink/bigquery/models/Constants.java | 20 - .../sink/bigquery/models/MetadataUtil.java | 27 -- .../sink/bigquery/models/ProtoField.java | 85 ---- .../firehose/sink/bigquery/models/Record.java | 22 - .../sink/bigquery/models/Records.java | 13 - .../sink/bigquery/proto/DescriptorCache.java | 18 - .../sink/bigquery/proto/ProtoFieldParser.java | 44 -- .../sink/bigquery/proto/ProtoMapper.java | 60 --- .../bigquery/proto/ProtoUpdateListener.java | 140 ------ .../bigquery/proto/UnknownProtoFields.java | 21 - .../io/odpf/firehose/sink/blob/BlobSink.java | 16 +- .../firehose/sink/blob/BlobSinkFactory.java | 8 +- .../sink/blob/writer/WriterOrchestrator.java | 8 +- .../blob/writer/local/LocalFileChecker.java | 24 +- .../sink/blob/writer/local/LocalStorage.java | 10 +- .../writer/remote/BlobStorageChecker.java | 6 +- .../BlobStorageWriterFutureHandler.java | 18 +- .../sink/common/AbstractHttpSink.java | 18 +- .../{log => common}/KeyOrMessageParser.java | 2 +- .../blobstorage/BlobStorageException.java | 2 +- .../firehose/sink/dlq/DlqWriterFactory.java | 8 +- .../sink/dlq/kafka/KafkaDlqWriter.java | 16 +- .../firehose/sink/dlq/log/LogDlqWriter.java | 12 +- .../firehose/sink/elasticsearch/EsSink.java | 20 +- .../sink/elasticsearch/EsSinkFactory.java | 20 +- .../request/EsRequestHandlerFactory.java | 6 +- .../io/odpf/firehose/sink/grpc/GrpcSink.java | 14 +- .../firehose/sink/grpc/GrpcSinkFactory.java | 14 +- .../firehose/sink/grpc/client/GrpcClient.java | 10 +- .../io/odpf/firehose/sink/http/HttpSink.java | 12 +- .../firehose/sink/http/HttpSinkFactory.java | 12 +- .../sink/http/auth/OAuth2Credential.java | 14 +- .../sink/http/factory/SerializerFactory.java | 18 +- .../sink/http/request/RequestFactory.java | 11 +- .../request/create/BatchRequestCreator.java | 10 +- .../create/IndividualRequestCreator.java | 10 +- .../http/request/types/DynamicUrlRequest.java | 6 +- .../types/ParameterizedHeaderRequest.java | 6 +- .../types/ParameterizedUriRequest.java | 6 +- .../http/request/types/SimpleRequest.java | 8 +- .../firehose/sink/influxdb/InfluxSink.java | 14 +- .../sink/influxdb/InfluxSinkFactory.java | 12 +- .../io/odpf/firehose/sink/jdbc/JdbcSink.java | 18 +- .../firehose/sink/jdbc/JdbcSinkFactory.java | 12 +- .../io/odpf/firehose/sink/log/LogSink.java | 47 -- .../firehose/sink/log/LogSinkFactory.java | 35 -- .../odpf/firehose/sink/mongodb/MongoSink.java | 10 +- .../sink/mongodb/MongoSinkFactory.java | 26 +- .../sink/mongodb/client/MongoSinkClient.java | 60 +-- .../mongodb/client/MongoSinkClientUtil.java | 14 +- .../request/MongoRequestHandlerFactory.java | 6 +- .../mongodb/util/MongoSinkFactoryUtil.java | 8 +- .../firehose/sink/prometheus/PromSink.java | 12 +- .../sink/prometheus/PromSinkFactory.java | 10 +- .../sink/prometheus/request/PromRequest.java | 10 +- .../request/PromRequestCreator.java | 12 +- .../odpf/firehose/sink/redis/RedisSink.java | 10 +- .../firehose/sink/redis/RedisSinkFactory.java | 14 +- .../sink/redis/client/RedisClientFactory.java | 10 +- .../sink/redis/client/RedisClusterClient.java | 12 +- .../redis/client/RedisStandaloneClient.java | 14 +- .../dataentry/RedisHashSetFieldEntry.java | 8 +- .../redis/dataentry/RedisKeyValueEntry.java | 10 +- .../sink/redis/dataentry/RedisListEntry.java | 8 +- .../redis/parsers/RedisHashSetParser.java | 7 +- .../redis/parsers/RedisKeyValueParser.java | 8 +- .../sink/redis/parsers/RedisListParser.java | 6 +- .../redis/parsers/RedisParserFactory.java | 3 +- .../odpf/firehose/sinkdecorator/BackOff.java | 8 +- .../ExponentialBackOffProvider.java | 14 +- .../firehose/sinkdecorator/SinkFinal.java | 12 +- .../firehose/sinkdecorator/SinkWithDlq.java | 22 +- .../firehose/sinkdecorator/SinkWithRetry.java | 26 +- .../firehose/utils/ConsumerRebalancer.java | 8 +- .../io/odpf/firehose/utils/KafkaUtils.java | 12 +- .../HttpSinkDataFormatTypeConverterTest.java | 2 +- .../ConsumerAndOffsetManagerTest.java | 10 +- .../consumer/FirehoseAsyncConsumerTest.java | 12 +- .../firehose/consumer/FirehoseFilterTest.java | 14 +- .../consumer/FirehoseSyncConsumerTest.java | 37 +- .../kafka/FirehoseKafkaConsumerTest.java | 32 +- .../odpf/firehose/error/ErrorHandlerTest.java | 2 + .../odpf/firehose/filter/NoOpFilterTest.java | 12 +- .../firehose/filter/jexl/JexlFilterTest.java | 18 +- .../firehose/filter/json/JsonFilterTest.java | 56 +-- .../filter/json/JsonFilterUtilTest.java | 36 +- .../io/odpf/firehose/launch/TaskTest.java | 10 +- .../message/FirehoseMessageUtilsTest.java | 73 ++++ .../io/odpf/firehose/message/MessageTest.java | 2 +- ....java => FirehoseInstrumentationTest.java} | 69 +-- .../serializer/MessageToJsonTest.java | 3 - .../MessageToTemplatizedJsonTest.java | 18 +- .../odpf/firehose/sink/AbstractSinkTest.java | 97 +++-- .../firehose/sink/GenericOdpfSinkTest.java | 85 ++++ .../firehose/sink/SinkFactoryUtilsTest.java | 24 ++ .../sink/bigquery/BigQuerySinkTest.java | 210 --------- .../sink/bigquery/BigquerySinkUtilsTest.java | 31 ++ .../firehose/sink/bigquery/MessageUtils.java | 89 ---- .../firehose/sink/bigquery/OffsetInfo.java | 17 - .../converter/MessageRecordConverterTest.java | 283 ------------ .../bigquery/converter/RowMapperTest.java | 385 ----------------- .../converter/fields/ByteProtoFieldTest.java | 51 --- .../fields/DefaultProtoFieldTest.java | 31 -- .../converter/fields/EnumProtoFieldTest.java | 56 --- .../fields/NestedProtoFieldTest.java | 43 -- .../fields/StructProtoFieldTest.java | 99 ----- .../fields/TimestampProtoFieldTest.java | 46 -- .../sink/bigquery/handler/BQClientTest.java | 408 ------------------ .../handler/BQTableDefinitionTest.java | 132 ------ .../handler/BigQueryResponseParserTest.java | 104 ----- .../handler/BigQueryRowWithInsertIdTest.java | 27 -- .../BigQueryRowWithoutInsertIdTest.java | 26 -- .../sink/bigquery/models/BQFieldTest.java | 295 ------------- .../sink/bigquery/models/ProtoFieldTest.java | 72 ---- .../sink/bigquery/models/RecordTest.java | 30 -- .../bigquery/proto/ProtoFieldParserTest.java | 181 -------- .../sink/bigquery/proto/ProtoMapperTest.java | 355 --------------- .../proto/ProtoUpdateListenerTest.java | 269 ------------ .../sink/bigquery/proto/ProtoUtil.java | 65 --- .../odpf/firehose/sink/blob/BlobSinkTest.java | 17 +- .../blob/message/MessageDeSerializerTest.java | 2 +- ...etadataProtoFirehoseMessageUtilsTest.java} | 2 +- .../blob/writer/WriterOrchestratorTest.java | 4 +- .../writer/local/LocalFileCheckerTest.java | 18 +- .../blob/writer/local/LocalStorageTest.java | 10 +- .../local/TimePartitionedPathUtilsTest.java | 2 +- .../policy/SizeBasedRotatingPolicyTest.java | 2 +- .../policy/TimeBasedRotatingPolicyTest.java | 2 +- .../writer/remote/BlobStorageCheckerTest.java | 17 +- .../BlobStorageWriterFutureHandlerTest.java | 25 +- .../KeyOrMessageParserTest.java | 4 +- .../firehose/sink/dlq/KafkaDlqWriterTest.java | 16 +- .../blobstorage/BlobStorageDlqWriterTest.java | 11 +- .../sink/elasticsearch/EsSinkFactoryTest.java | 22 +- .../sink/elasticsearch/EsSinkTest.java | 44 +- .../request/ESUpdateRequestHandlerTest.java | 11 +- .../request/ESUpsertRequestHandlerTest.java | 10 +- .../request/EsRequestHandlerFactoryTest.java | 12 +- .../firehose/sink/grpc/GrpcClientTest.java | 30 +- .../sink/grpc/GrpcSinkFactoryTest.java | 4 +- .../odpf/firehose/sink/grpc/GrpcSinkTest.java | 35 +- .../sink/http/HttpSinkFactoryTest.java | 4 +- .../odpf/firehose/sink/http/HttpSinkTest.java | 90 ++-- .../sink/http/auth/OAuth2CredentialTest.java | 6 +- .../sink/http/request/RequestFactoryTest.java | 2 +- .../sink/http/request/body/JsonBodyTest.java | 2 +- .../create/BatchRequestCreatorTest.java | 29 +- .../create/IndividualRequestCreatorTest.java | 39 +- .../request/types/DynamicUrlRequestTest.java | 7 +- .../types/ParameterizedHeaderRequestTest.java | 7 +- .../types/ParameterizedUriRequestTest.java | 7 +- .../http/request/types/SimpleRequestTest.java | 7 +- .../sink/http/request/uri/UriParserTest.java | 3 +- .../sink/influxdb/InfluxSinkTest.java | 38 +- .../jdbc/HikariJdbcConnectionPoolTest.java | 2 +- .../odpf/firehose/sink/jdbc/JdbcSinkTest.java | 54 +-- .../firehose/sink/jdbc/QueryTemplateTest.java | 2 +- .../firehose/sink/log/LogSinkFactoryTest.java | 38 -- .../odpf/firehose/sink/log/LogSinkTest.java | 86 ---- .../sink/mongodb/MongoSinkFactoryTest.java | 16 +- .../firehose/sink/mongodb/MongoSinkTest.java | 12 +- .../mongodb/client/MongoSinkClientTest.java | 104 ++--- .../client/MongoSinkClientUtilTest.java | 8 +- .../MongoRequestHandlerFactoryTest.java | 22 +- .../MongoUpdateRequestHandlerTest.java | 10 +- .../MongoUpsertRequestHandlerTest.java | 10 +- .../util/MongoSinkFactoryUtilTest.java | 22 +- .../sink/prometheus/PromSinkFactoryTest.java | 4 +- .../sink/prometheus/PromSinkTest.java | 60 +-- .../request/PromRequestCreatorTest.java | 2 +- .../prometheus/request/PromRequestTest.java | 6 +- .../sink/redis/RedisSinkFactoryTest.java | 2 +- .../firehose/sink/redis/RedisSinkTest.java | 48 +-- .../redis/client/RedisClientFactoryTest.java | 4 +- .../redis/client/RedisClusterClientTest.java | 20 +- .../client/RedisStandaloneClientTest.java | 22 +- .../dataentry/RedisHashSetFieldEntryTest.java | 24 +- .../dataentry/RedisKeyValueEntryTest.java | 24 +- .../redis/dataentry/RedisListEntryTest.java | 24 +- .../redis/parsers/RedisHashSetParserTest.java | 4 +- .../parsers/RedisKeyValueParserTest.java | 8 +- .../redis/parsers/RedisListParserTest.java | 6 +- .../redis/parsers/RedisParserFactoryTest.java | 4 +- .../sink/redis/ttl/DurationTTLTest.java | 2 +- .../ExponentialBackOffProviderTest.java | 24 +- .../firehose/sinkdecorator/SinkFinalTest.java | 10 +- .../sinkdecorator/SinkWithDlqTest.java | 63 ++- .../SinkWithFailHandlerTest.java | 6 +- .../sinkdecorator/SinkWithRetryTest.java | 72 ++-- 253 files changed, 1842 insertions(+), 7074 deletions(-) delete mode 100644 src/main/java/io/odpf/firehose/error/ErrorInfo.java delete mode 100644 src/main/java/io/odpf/firehose/error/ErrorType.java create mode 100644 src/main/java/io/odpf/firehose/message/FirehoseMessageUtils.java create mode 100644 src/main/java/io/odpf/firehose/metrics/FirehoseInstrumentation.java delete mode 100644 src/main/java/io/odpf/firehose/metrics/Instrumentation.java delete mode 100644 src/main/java/io/odpf/firehose/metrics/StatsDReporter.java delete mode 100644 src/main/java/io/odpf/firehose/metrics/StatsDReporterFactory.java create mode 100644 src/main/java/io/odpf/firehose/sink/GenericOdpfSink.java create mode 100644 src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySink.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySinkFactory.java create mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtils.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverter.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterCache.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/ProtoFieldFactory.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/RowMapper.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorDescriptor.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorParser.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/InvalidSchemaError.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/OOBError.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/StoppedError.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/error/UnknownError.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/BQDatasetLocationChangedException.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/BQPartitionKeyNotSpecified.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/BQSchemaMappingException.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/BQTableUpdateFailure.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/BigQuerySinkException.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/ConfigurationException.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/exception/ProtoNotFoundException.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinition.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryClient.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParser.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRow.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertId.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertId.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/BQField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/Constants.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/MetadataUtil.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/ProtoField.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/Record.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/models/Records.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/proto/DescriptorCache.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParser.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapper.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListener.java delete mode 100644 src/main/java/io/odpf/firehose/sink/bigquery/proto/UnknownProtoFields.java rename src/main/java/io/odpf/firehose/sink/{log => common}/KeyOrMessageParser.java (96%) delete mode 100644 src/main/java/io/odpf/firehose/sink/log/LogSink.java delete mode 100644 src/main/java/io/odpf/firehose/sink/log/LogSinkFactory.java create mode 100644 src/test/java/io/odpf/firehose/message/FirehoseMessageUtilsTest.java rename src/test/java/io/odpf/firehose/metrics/{InstrumentationTest.java => FirehoseInstrumentationTest.java} (67%) create mode 100644 src/test/java/io/odpf/firehose/sink/GenericOdpfSinkTest.java create mode 100644 src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/BigQuerySinkTest.java create mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtilsTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/MessageUtils.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/OffsetInfo.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/RowMapperTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/handler/BQClientTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinitionTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParserTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertIdTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertIdTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/models/BQFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/models/ProtoFieldTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/models/RecordTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParserTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapperTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListenerTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUtil.java rename src/test/java/io/odpf/firehose/sink/blob/proto/{KafkaMetadataProtoMessageUtilsTest.java => KafkaMetadataProtoFirehoseMessageUtilsTest.java} (97%) rename src/test/java/io/odpf/firehose/sink/{log => common}/KeyOrMessageParserTest.java (95%) delete mode 100644 src/test/java/io/odpf/firehose/sink/log/LogSinkFactoryTest.java delete mode 100644 src/test/java/io/odpf/firehose/sink/log/LogSinkTest.java diff --git a/build.gradle b/build.gradle index 0bf8a423a..57a20c308 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,6 @@ repositories { } } - private Properties loadEnv() { Properties properties = new Properties() properties.load(new FileInputStream(file("${projectDir}/env/local.properties"))); @@ -86,7 +85,7 @@ dependencies { exclude group: "log4j", module: "log4j" } implementation 'io.confluent:monitoring-interceptors:3.0.0' - implementation "io.grpc:grpc-all:1.18.0" + implementation "io.grpc:grpc-all:1.38.0" implementation group: 'org.jfrog.buildinfo', name: 'build-info-extractor', version: '2.6.3' implementation group: 'com.google.gradle', name: 'osdetector-gradle-plugin', version: '1.2.1' implementation group: 'org.apache.ivy', name: 'ivy', version: '2.2.0' @@ -102,12 +101,12 @@ dependencies { implementation 'com.google.cloud:google-cloud-storage:1.114.0' implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' implementation 'org.apache.logging.log4j:log4j-core:2.17.1' - + implementation group: 'io.odpf', name: 'depot', version: '0.1.3' implementation group: 'com.networknt', name: 'json-schema-validator', version: '1.0.59' exclude group: 'org.slf4j' testImplementation group: 'junit', name: 'junit', version: '4.11' testImplementation 'org.hamcrest:hamcrest-all:1.3' - testImplementation 'org.mockito:mockito-core:2.0.99-beta' + testImplementation 'org.mockito:mockito-core:4.5.1' testImplementation "com.github.tomakehurst:wiremock:2.3.1" testImplementation group: 'io.opentracing', name: 'opentracing-mock', version: '0.33.0' testImplementation group: 'org.mock-server', name: 'mockserver-netty', version: '3.10.5' @@ -133,7 +132,7 @@ protobuf { task.generateDescriptorSet = true task.descriptorSetOptions.includeSourceInfo = false task.descriptorSetOptions.includeImports = true - task.descriptorSetOptions.path = "$projectDir/src/test/resources/__files/descriptors.bin" + task.descriptorSetOptions.path = "$projectDir/src/test/resources/__files/descriptors.bin" } } } @@ -157,17 +156,17 @@ test { clean { delete "$projectDir/src/test/resources/__files" } - jar { - manifest { - attributes 'Main-Class': 'io.odpf.firehose.launch.Main' - duplicatesStrategy = 'exclude' - } - from { - configurations.runtimeClasspath.collect { it.isDirectory() ? it : zipTree(it) } - } + manifest { + attributes 'Main-Class': 'io.odpf.firehose.launch.Main' + duplicatesStrategy = 'exclude' + zip64 = true + } + from { + configurations.runtimeClasspath.collect { it.isDirectory() ? it : zipTree(it) } + } + exclude('META-INF/*.RSA', 'META-INF/*.SF', 'META-INF/*.DSA') } - publishing { publications { maven(MavenPublication) { diff --git a/docs/docs/concepts/filters.md b/docs/docs/concepts/filters.md index cf713eaaf..ff032daf9 100644 --- a/docs/docs/concepts/filters.md +++ b/docs/docs/concepts/filters.md @@ -48,11 +48,11 @@ You can read more about JSON Schema [here](https://json-schema.org/). For more d The filtering occurs in the following steps - -- JSON filter configurations are validated and logged to instrumentation by JsonFilterUtil. In case any configuration is invalid, then IllegalArgumentException is thrown and Firehose is terminated. +- JSON filter configurations are validated and logged to firehoseInstrumentation by JsonFilterUtil. In case any configuration is invalid, then IllegalArgumentException is thrown and Firehose is terminated. - If `FILTER_ESB_MESSAGE_FORMAT=PROTOBUF`, then the serialized key/message protobuf byte array is deserialized to POJO object by the Proto schema class. It is then converted to a JSON string so that it can be parsed by the JSON Schema Validator. - If`FILTER_ESB_MESSAGE_FORMAT=JSON`, then the serialized JSON byte array is deserialized to a JSON message string. - The JSON Schema validator performs a validation on the JSON message against the filter rules specified in the JSON Schema string provided in the environment variable`FILTER_JSON_SCHEMA.` -- If there are any validation errors, then that key/message is filtered out and the validation errors are logged to the instrumentation in debug mode. +- If there are any validation errors, then that key/message is filtered out and the validation errors are logged to the firehoseInstrumentation in debug mode. - If all validation checks pass, then the key/message is added to the ArrayList of filtered messages and returned by the JsonFilter. ## Why Use Filters diff --git a/src/main/java/io/odpf/firehose/config/ErrorConfig.java b/src/main/java/io/odpf/firehose/config/ErrorConfig.java index f9c731eb1..4568af826 100644 --- a/src/main/java/io/odpf/firehose/config/ErrorConfig.java +++ b/src/main/java/io/odpf/firehose/config/ErrorConfig.java @@ -1,7 +1,7 @@ package io.odpf.firehose.config; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.converter.SetErrorTypeConverter; -import io.odpf.firehose.error.ErrorType; import org.aeonbits.owner.Config; import org.aeonbits.owner.Mutable; diff --git a/src/main/java/io/odpf/firehose/config/converter/SetErrorTypeConverter.java b/src/main/java/io/odpf/firehose/config/converter/SetErrorTypeConverter.java index 5ac0532ee..3b121593d 100644 --- a/src/main/java/io/odpf/firehose/config/converter/SetErrorTypeConverter.java +++ b/src/main/java/io/odpf/firehose/config/converter/SetErrorTypeConverter.java @@ -1,6 +1,6 @@ package io.odpf.firehose.config.converter; -import io.odpf.firehose.error.ErrorType; +import io.odpf.depot.error.ErrorType; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/firehose/consumer/FirehoseAsyncConsumer.java b/src/main/java/io/odpf/firehose/consumer/FirehoseAsyncConsumer.java index b2216e665..45f9d8190 100644 --- a/src/main/java/io/odpf/firehose/consumer/FirehoseAsyncConsumer.java +++ b/src/main/java/io/odpf/firehose/consumer/FirehoseAsyncConsumer.java @@ -3,10 +3,10 @@ import io.odpf.firehose.consumer.kafka.ConsumerAndOffsetManager; import io.odpf.firehose.exception.FirehoseConsumerFailedException; import io.odpf.firehose.message.Message; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.SinkPool; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; import io.odpf.firehose.tracer.SinkTracer; import io.opentracing.Span; import lombok.AllArgsConstructor; @@ -24,7 +24,7 @@ public class FirehoseAsyncConsumer implements FirehoseConsumer { private final SinkTracer tracer; private final ConsumerAndOffsetManager consumerAndOffsetManager; private final FirehoseFilter firehoseFilter; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; @Override public void process() { @@ -47,7 +47,7 @@ public void process() { } catch (FilterException e) { throw new FirehoseConsumerFailedException(e); } finally { - instrumentation.captureDurationSince(SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, beforeCall); + firehoseInstrumentation.captureDurationSince(SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, beforeCall); } } @@ -55,10 +55,10 @@ private Future> scheduleTask(List messages) { while (true) { Future> scheduledTask = sinkPool.submitTask(messages); if (scheduledTask == null) { - instrumentation.logInfo("The Queue is full"); + firehoseInstrumentation.logInfo("The Queue is full"); sinkPool.fetchFinishedSinkTasks().forEach(consumerAndOffsetManager::setCommittable); } else { - instrumentation.logInfo("Adding sink task"); + firehoseInstrumentation.logInfo("Adding sink task"); return scheduledTask; } } @@ -69,6 +69,6 @@ public void close() throws IOException { consumerAndOffsetManager.close(); tracer.close(); sinkPool.close(); - instrumentation.close(); + firehoseInstrumentation.close(); } } diff --git a/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java b/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java index e15f510ec..1eade3ec8 100644 --- a/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java +++ b/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java @@ -1,9 +1,11 @@ package io.odpf.firehose.consumer; import io.jaegertracing.Configuration; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.consumer.kafka.ConsumerAndOffsetManager; import io.odpf.firehose.consumer.kafka.FirehoseKafkaConsumer; import io.odpf.firehose.consumer.kafka.OffsetManager; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.SinkFactory; import io.odpf.firehose.utils.KafkaUtils; import io.odpf.firehose.config.AppConfig; @@ -19,10 +21,8 @@ import io.odpf.firehose.filter.jexl.JexlFilter; import io.odpf.firehose.filter.json.JsonFilter; import io.odpf.firehose.filter.json.JsonFilterUtil; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.Sink; -import io.odpf.firehose.sink.log.KeyOrMessageParser; +import io.odpf.firehose.sink.common.KeyOrMessageParser; import io.odpf.firehose.sinkdecorator.BackOff; import io.odpf.firehose.sinkdecorator.BackOffProvider; import io.odpf.firehose.error.ErrorHandler; @@ -58,7 +58,7 @@ public class FirehoseConsumerFactory { private final Map config = System.getenv(); private final StatsDReporter statsDReporter; private final StencilClient stencilClient; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final KeyOrMessageParser parser; private final OffsetManager offsetManager = new OffsetManager(); @@ -71,14 +71,14 @@ public class FirehoseConsumerFactory { public FirehoseConsumerFactory(KafkaConsumerConfig kafkaConsumerConfig, StatsDReporter statsDReporter) { this.kafkaConsumerConfig = kafkaConsumerConfig; this.statsDReporter = statsDReporter; - instrumentation = new Instrumentation(this.statsDReporter, FirehoseConsumerFactory.class); + firehoseInstrumentation = new FirehoseInstrumentation(this.statsDReporter, FirehoseConsumerFactory.class); String additionalConsumerConfig = String.format("" + "\n\tEnable Async Commit: %s" + "\n\tCommit Only Current Partition: %s", this.kafkaConsumerConfig.isSourceKafkaAsyncCommitEnable(), this.kafkaConsumerConfig.isSourceKafkaCommitOnlyCurrentPartitionsEnable()); - instrumentation.logDebug(additionalConsumerConfig); + firehoseInstrumentation.logDebug(additionalConsumerConfig); String stencilUrl = this.kafkaConsumerConfig.getSchemaRegistryStencilUrls(); stencilClient = this.kafkaConsumerConfig.isSchemaRegistryStencilEnable() @@ -88,25 +88,25 @@ public FirehoseConsumerFactory(KafkaConsumerConfig kafkaConsumerConfig, StatsDRe } private FirehoseFilter buildFilter(FilterConfig filterConfig) { - instrumentation.logInfo("Filter Engine: {}", filterConfig.getFilterEngine()); + firehoseInstrumentation.logInfo("Filter Engine: {}", filterConfig.getFilterEngine()); Filter filter; switch (filterConfig.getFilterEngine()) { case JSON: - Instrumentation jsonFilterUtilInstrumentation = new Instrumentation(statsDReporter, JsonFilterUtil.class); - JsonFilterUtil.logConfigs(filterConfig, jsonFilterUtilInstrumentation); - JsonFilterUtil.validateConfigs(filterConfig, jsonFilterUtilInstrumentation); - filter = new JsonFilter(stencilClient, filterConfig, new Instrumentation(statsDReporter, JsonFilter.class)); + FirehoseInstrumentation jsonFilterUtilFirehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, JsonFilterUtil.class); + JsonFilterUtil.logConfigs(filterConfig, jsonFilterUtilFirehoseInstrumentation); + JsonFilterUtil.validateConfigs(filterConfig, jsonFilterUtilFirehoseInstrumentation); + filter = new JsonFilter(stencilClient, filterConfig, new FirehoseInstrumentation(statsDReporter, JsonFilter.class)); break; case JEXL: - filter = new JexlFilter(filterConfig, new Instrumentation(statsDReporter, JexlFilter.class)); + filter = new JexlFilter(filterConfig, new FirehoseInstrumentation(statsDReporter, JexlFilter.class)); break; case NO_OP: - filter = new NoOpFilter(new Instrumentation(statsDReporter, NoOpFilter.class)); + filter = new NoOpFilter(new FirehoseInstrumentation(statsDReporter, NoOpFilter.class)); break; default: throw new IllegalArgumentException("Invalid filter engine type"); } - return new FirehoseFilter(filter, new Instrumentation(statsDReporter, FirehoseFilter.class)); + return new FirehoseFilter(filter, new FirehoseInstrumentation(statsDReporter, FirehoseFilter.class)); } /** @@ -128,13 +128,13 @@ public FirehoseConsumer buildConsumer() { sinkFactory.init(); if (kafkaConsumerConfig.getSourceKafkaConsumerMode().equals(KafkaConsumerMode.SYNC)) { Sink sink = createSink(tracer, sinkFactory); - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), offsetManager, firehoseKafkaConsumer, kafkaConsumerConfig, new Instrumentation(statsDReporter, ConsumerAndOffsetManager.class)); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), offsetManager, firehoseKafkaConsumer, kafkaConsumerConfig, new FirehoseInstrumentation(statsDReporter, ConsumerAndOffsetManager.class)); return new FirehoseSyncConsumer( sink, firehoseTracer, consumerAndOffsetManager, firehoseFilter, - new Instrumentation(statsDReporter, FirehoseSyncConsumer.class)); + new FirehoseInstrumentation(statsDReporter, FirehoseSyncConsumer.class)); } else { SinkPoolConfig sinkPoolConfig = ConfigFactory.create(SinkPoolConfig.class, config); int nThreads = sinkPoolConfig.getSinkPoolNumThreads(); @@ -142,7 +142,7 @@ public FirehoseConsumer buildConsumer() { for (int ii = 0; ii < nThreads; ii++) { sinks.add(createSink(tracer, sinkFactory)); } - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, firehoseKafkaConsumer, kafkaConsumerConfig, new Instrumentation(statsDReporter, ConsumerAndOffsetManager.class)); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, firehoseKafkaConsumer, kafkaConsumerConfig, new FirehoseInstrumentation(statsDReporter, ConsumerAndOffsetManager.class)); SinkPool sinkPool = new SinkPool( new LinkedBlockingQueue<>(sinks), Executors.newCachedThreadPool(), @@ -152,7 +152,7 @@ public FirehoseConsumer buildConsumer() { firehoseTracer, consumerAndOffsetManager, firehoseFilter, - new Instrumentation(statsDReporter, FirehoseAsyncConsumer.class)); + new FirehoseInstrumentation(statsDReporter, FirehoseAsyncConsumer.class)); } } @@ -162,7 +162,7 @@ private Sink createSink(Tracer tracer, SinkFactory sinkFactory) { Sink sinkWithFailHandler = new SinkWithFailHandler(baseSink, errorHandler); Sink sinkWithRetry = withRetry(sinkWithFailHandler, errorHandler); Sink sinWithDLQ = withDlq(sinkWithRetry, tracer, errorHandler); - return new SinkFinal(sinWithDLQ, new Instrumentation(statsDReporter, SinkFinal.class)); + return new SinkFinal(sinWithDLQ, new FirehoseInstrumentation(statsDReporter, SinkFinal.class)); } public Sink withDlq(Sink sink, Tracer tracer, ErrorHandler errorHandler) { @@ -178,7 +178,7 @@ public Sink withDlq(Sink sink, Tracer tracer, ErrorHandler errorHandler) { backOffProvider, dlqConfig, errorHandler, - new Instrumentation(statsDReporter, SinkWithDlq.class)); + new FirehoseInstrumentation(statsDReporter, SinkWithDlq.class)); } /** @@ -191,7 +191,7 @@ public Sink withDlq(Sink sink, Tracer tracer, ErrorHandler errorHandler) { private Sink withRetry(Sink sink, ErrorHandler errorHandler) { AppConfig appConfig = ConfigFactory.create(AppConfig.class, config); BackOffProvider backOffProvider = getBackOffProvider(); - return new SinkWithRetry(sink, backOffProvider, new Instrumentation(statsDReporter, SinkWithRetry.class), appConfig, parser, errorHandler); + return new SinkWithRetry(sink, backOffProvider, new FirehoseInstrumentation(statsDReporter, SinkWithRetry.class), appConfig, parser, errorHandler); } private BackOffProvider getBackOffProvider() { @@ -200,7 +200,7 @@ private BackOffProvider getBackOffProvider() { appConfig.getRetryExponentialBackoffInitialMs(), appConfig.getRetryExponentialBackoffRate(), appConfig.getRetryExponentialBackoffMaxMs(), - new Instrumentation(statsDReporter, ExponentialBackOffProvider.class), - new BackOff(new Instrumentation(statsDReporter, BackOff.class))); + new FirehoseInstrumentation(statsDReporter, ExponentialBackOffProvider.class), + new BackOff(new FirehoseInstrumentation(statsDReporter, BackOff.class))); } } diff --git a/src/main/java/io/odpf/firehose/consumer/FirehoseFilter.java b/src/main/java/io/odpf/firehose/consumer/FirehoseFilter.java index aa9c31482..4115795d4 100644 --- a/src/main/java/io/odpf/firehose/consumer/FirehoseFilter.java +++ b/src/main/java/io/odpf/firehose/consumer/FirehoseFilter.java @@ -4,7 +4,7 @@ import io.odpf.firehose.filter.Filter; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import lombok.AllArgsConstructor; @@ -13,14 +13,14 @@ @AllArgsConstructor public class FirehoseFilter { private final Filter filter; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; public FilteredMessages applyFilter(List messages) throws FilterException { FilteredMessages filteredMessage = filter.filter(messages); int filteredMessageCount = filteredMessage.sizeOfInvalidMessages(); if (filteredMessageCount > 0) { - instrumentation.captureFilteredMessageCount(filteredMessageCount); - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.FILTERED, filteredMessageCount); + firehoseInstrumentation.captureFilteredMessageCount(filteredMessageCount); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.FILTERED, filteredMessageCount); } return filteredMessage; } diff --git a/src/main/java/io/odpf/firehose/consumer/FirehoseSyncConsumer.java b/src/main/java/io/odpf/firehose/consumer/FirehoseSyncConsumer.java index 19cedf0f4..ff96a1cfb 100644 --- a/src/main/java/io/odpf/firehose/consumer/FirehoseSyncConsumer.java +++ b/src/main/java/io/odpf/firehose/consumer/FirehoseSyncConsumer.java @@ -5,7 +5,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.tracer.SinkTracer; import io.opentracing.Span; @@ -27,7 +27,7 @@ public class FirehoseSyncConsumer implements FirehoseConsumer { private final SinkTracer tracer; private final ConsumerAndOffsetManager consumerAndOffsetManager; private final FirehoseFilter firehoseFilter; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; @Override public void process() throws IOException { @@ -44,12 +44,12 @@ public void process() throws IOException { consumerAndOffsetManager.addOffsetsAndSetCommittable(filteredMessages.getValidMessages()); } consumerAndOffsetManager.commit(); - instrumentation.logInfo("Processed {} records in consumer", messages.size()); + firehoseInstrumentation.logInfo("Processed {} records in consumer", messages.size()); tracer.finishTrace(spans); } catch (FilterException e) { throw new FirehoseConsumerFailedException(e); } finally { - instrumentation.captureDurationSince(SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, beforeCall); + firehoseInstrumentation.captureDurationSince(SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, beforeCall); } } @@ -57,7 +57,7 @@ public void process() throws IOException { public void close() throws IOException { tracer.close(); consumerAndOffsetManager.close(); - instrumentation.close(); + firehoseInstrumentation.close(); sink.close(); } } diff --git a/src/main/java/io/odpf/firehose/consumer/kafka/ConsumerAndOffsetManager.java b/src/main/java/io/odpf/firehose/consumer/kafka/ConsumerAndOffsetManager.java index d4d8db308..161b30855 100644 --- a/src/main/java/io/odpf/firehose/consumer/kafka/ConsumerAndOffsetManager.java +++ b/src/main/java/io/odpf/firehose/consumer/kafka/ConsumerAndOffsetManager.java @@ -2,7 +2,7 @@ import io.odpf.firehose.config.KafkaConsumerConfig; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import java.io.IOException; @@ -34,7 +34,7 @@ public class ConsumerAndOffsetManager implements AutoCloseable { private final List sinks; private final FirehoseKafkaConsumer firehoseKafkaConsumer; private final KafkaConsumerConfig kafkaConsumerConfig; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final boolean canSinkManageOffsets; private long lastCommitTimeStamp = 0; @@ -43,12 +43,12 @@ public ConsumerAndOffsetManager( OffsetManager offsetManager, FirehoseKafkaConsumer firehoseKafkaConsumer, KafkaConsumerConfig kafkaConsumerConfig, - Instrumentation instrumentation) { + FirehoseInstrumentation firehoseInstrumentation) { this.sinks = sinks; this.offsetManager = offsetManager; this.firehoseKafkaConsumer = firehoseKafkaConsumer; this.kafkaConsumerConfig = kafkaConsumerConfig; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.canSinkManageOffsets = sinks.get(0).canManageOffsets(); } @@ -99,7 +99,7 @@ public void commit() { @Override public void close() throws IOException { if (firehoseKafkaConsumer != null) { - instrumentation.logInfo("closing consumer"); + firehoseInstrumentation.logInfo("closing consumer"); firehoseKafkaConsumer.close(); } } diff --git a/src/main/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumer.java b/src/main/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumer.java index f99607655..8dd38f4df 100644 --- a/src/main/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumer.java +++ b/src/main/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumer.java @@ -1,7 +1,7 @@ package io.odpf.firehose.consumer.kafka; import io.odpf.firehose.config.KafkaConsumerConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.message.Message; import org.apache.kafka.clients.consumer.Consumer; @@ -28,20 +28,20 @@ public class FirehoseKafkaConsumer implements AutoCloseable { private final Consumer kafkaConsumer; private final KafkaConsumerConfig consumerConfig; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final Map committedOffsets = new ConcurrentHashMap<>(); /** * A Constructor. * - * @param kafkaConsumer {@see KafkaConsumer} - * @param config Consumer configuration. - * @param instrumentation Contain logging and metrics collection + * @param kafkaConsumer {@see KafkaConsumer} + * @param config Consumer configuration. + * @param firehoseInstrumentation Contain logging and metrics collection */ - public FirehoseKafkaConsumer(Consumer kafkaConsumer, KafkaConsumerConfig config, Instrumentation instrumentation) { + public FirehoseKafkaConsumer(Consumer kafkaConsumer, KafkaConsumerConfig config, FirehoseInstrumentation firehoseInstrumentation) { this.kafkaConsumer = kafkaConsumer; this.consumerConfig = config; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } /** @@ -51,24 +51,24 @@ public FirehoseKafkaConsumer(Consumer kafkaConsumer, KafkaConsum */ public List readMessages() { ConsumerRecords records = kafkaConsumer.poll(Duration.ofMillis(consumerConfig.getSourceKafkaPollTimeoutMs())); - instrumentation.logInfo("Pulled {} messages", records.count()); - instrumentation.capturePulledMessageHistogram(records.count()); - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, records.count()); + firehoseInstrumentation.logInfo("Pulled {} messages", records.count()); + firehoseInstrumentation.capturePulledMessageHistogram(records.count()); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, records.count()); List messages = new ArrayList<>(); for (ConsumerRecord record : records) { messages.add(new Message(record.key(), record.value(), record.topic(), record.partition(), record.offset(), record.headers(), record.timestamp(), System.currentTimeMillis())); - instrumentation.logDebug("Pulled record: {}", record); + firehoseInstrumentation.logDebug("Pulled record: {}", record); } return messages; } public void close() { try { - instrumentation.logInfo("Consumer is closing"); + firehoseInstrumentation.logInfo("Consumer is closing"); this.kafkaConsumer.close(); } catch (Exception e) { - instrumentation.captureNonFatalError(e, "Exception while closing consumer"); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, "Exception while closing consumer"); } } @@ -76,9 +76,9 @@ public void commit() { if (consumerConfig.isSourceKafkaAsyncCommitEnable()) { kafkaConsumer.commitAsync((offsets, exception) -> { if (exception != null) { - instrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, FAILURE_TAG); + firehoseInstrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, FAILURE_TAG); } else { - instrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, SUCCESS_TAG); + firehoseInstrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, SUCCESS_TAG); } }); } else { @@ -97,7 +97,7 @@ public void commit(Map offsets) { return; } latestOffsets.forEach((k, v) -> - instrumentation.logInfo("Committing Offsets " + k.topic() + ":" + k.partition() + "=>" + v.offset())); + firehoseInstrumentation.logInfo("Committing Offsets " + k.topic() + ":" + k.partition() + "=>" + v.offset())); if (consumerConfig.isSourceKafkaAsyncCommitEnable()) { commitAsync(latestOffsets); } else { @@ -112,9 +112,9 @@ private void commitAsync(Map offsets) { private void onComplete(Map offsets, Exception exception) { if (exception != null) { - instrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, FAILURE_TAG); + firehoseInstrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, FAILURE_TAG); } else { - instrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, SUCCESS_TAG); + firehoseInstrumentation.incrementCounter(SOURCE_KAFKA_MESSAGES_COMMIT_TOTAL, SUCCESS_TAG); } } } diff --git a/src/main/java/io/odpf/firehose/error/ErrorHandler.java b/src/main/java/io/odpf/firehose/error/ErrorHandler.java index 9dc25be6d..685c85259 100644 --- a/src/main/java/io/odpf/firehose/error/ErrorHandler.java +++ b/src/main/java/io/odpf/firehose/error/ErrorHandler.java @@ -1,5 +1,6 @@ package io.odpf.firehose.error; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.ErrorConfig; import io.odpf.firehose.message.Message; import lombok.AllArgsConstructor; diff --git a/src/main/java/io/odpf/firehose/error/ErrorInfo.java b/src/main/java/io/odpf/firehose/error/ErrorInfo.java deleted file mode 100644 index 0dbd96640..000000000 --- a/src/main/java/io/odpf/firehose/error/ErrorInfo.java +++ /dev/null @@ -1,15 +0,0 @@ -package io.odpf.firehose.error; - -import lombok.AllArgsConstructor; -import lombok.Data; - -@AllArgsConstructor -@Data -public class ErrorInfo { - private Exception exception; - private ErrorType errorType; - - public String toString() { - return errorType.name(); - } -} diff --git a/src/main/java/io/odpf/firehose/error/ErrorType.java b/src/main/java/io/odpf/firehose/error/ErrorType.java deleted file mode 100644 index d232fb68e..000000000 --- a/src/main/java/io/odpf/firehose/error/ErrorType.java +++ /dev/null @@ -1,11 +0,0 @@ -package io.odpf.firehose.error; - -public enum ErrorType { - DESERIALIZATION_ERROR, - INVALID_MESSAGE_ERROR, - UNKNOWN_FIELDS_ERROR, - SINK_4XX_ERROR, - SINK_5XX_ERROR, - SINK_UNKNOWN_ERROR, - DEFAULT_ERROR -} diff --git a/src/main/java/io/odpf/firehose/filter/NoOpFilter.java b/src/main/java/io/odpf/firehose/filter/NoOpFilter.java index 6bd0c8c7c..a8f1af431 100644 --- a/src/main/java/io/odpf/firehose/filter/NoOpFilter.java +++ b/src/main/java/io/odpf/firehose/filter/NoOpFilter.java @@ -1,14 +1,14 @@ package io.odpf.firehose.filter; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import java.util.List; public class NoOpFilter implements Filter { - public NoOpFilter(Instrumentation instrumentation) { - instrumentation.logInfo("No filter is selected"); + public NoOpFilter(FirehoseInstrumentation firehoseInstrumentation) { + firehoseInstrumentation.logInfo("No filter is selected"); } /** diff --git a/src/main/java/io/odpf/firehose/filter/jexl/JexlFilter.java b/src/main/java/io/odpf/firehose/filter/jexl/JexlFilter.java index 254def526..acab2d856 100644 --- a/src/main/java/io/odpf/firehose/filter/jexl/JexlFilter.java +++ b/src/main/java/io/odpf/firehose/filter/jexl/JexlFilter.java @@ -6,7 +6,7 @@ import io.odpf.firehose.filter.Filter; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.apache.commons.jexl2.Expression; import org.apache.commons.jexl2.JexlContext; import org.apache.commons.jexl2.JexlEngine; @@ -35,18 +35,18 @@ public class JexlFilter implements Filter { * Instantiates a new Message filter. * * @param filterConfig the consumer config - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation */ - public JexlFilter(FilterConfig filterConfig, Instrumentation instrumentation) { + public JexlFilter(FilterConfig filterConfig, FirehoseInstrumentation firehoseInstrumentation) { JexlEngine engine = new JexlEngine(); engine.setSilent(false); engine.setStrict(true); this.filterDataSourceType = filterConfig.getFilterDataSource(); this.protoSchema = filterConfig.getFilterSchemaProtoClass(); - instrumentation.logInfo("\n\tFilter type: {}", this.filterDataSourceType); + firehoseInstrumentation.logInfo("\n\tFilter type: {}", this.filterDataSourceType); this.expression = engine.createExpression(filterConfig.getFilterJexlExpression()); - instrumentation.logInfo("\n\tFilter schema: {}", this.protoSchema); - instrumentation.logInfo("\n\tFilter expression: {}", filterConfig.getFilterJexlExpression()); + firehoseInstrumentation.logInfo("\n\tFilter schema: {}", this.protoSchema); + firehoseInstrumentation.logInfo("\n\tFilter expression: {}", filterConfig.getFilterJexlExpression()); } /** diff --git a/src/main/java/io/odpf/firehose/filter/json/JsonFilter.java b/src/main/java/io/odpf/firehose/filter/json/JsonFilter.java index e8ecc7384..0ba501ee0 100644 --- a/src/main/java/io/odpf/firehose/filter/json/JsonFilter.java +++ b/src/main/java/io/odpf/firehose/filter/json/JsonFilter.java @@ -15,7 +15,7 @@ import io.odpf.firehose.filter.Filter; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.stencil.client.StencilClient; import io.odpf.stencil.Parser; @@ -32,7 +32,7 @@ public class JsonFilter implements Filter { private final FilterConfig filterConfig; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final JsonSchema schema; private final ObjectMapper objectMapper = new ObjectMapper(); private JsonFormat.Printer jsonPrinter; @@ -42,10 +42,10 @@ public class JsonFilter implements Filter { * Instantiates a new Json filter. * * @param filterConfig the consumer config - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation */ - public JsonFilter(StencilClient stencilClient, FilterConfig filterConfig, Instrumentation instrumentation) { - this.instrumentation = instrumentation; + public JsonFilter(StencilClient stencilClient, FilterConfig filterConfig, FirehoseInstrumentation firehoseInstrumentation) { + this.firehoseInstrumentation = firehoseInstrumentation; this.filterConfig = filterConfig; JsonSchemaFactory schemaFactory = JsonSchemaFactory.getInstance(SpecVersion.VersionFlag.V7); this.schema = schemaFactory.getSchema(filterConfig.getFilterJsonSchema()); @@ -80,12 +80,12 @@ public FilteredMessages filter(List messages) throws FilterException { private boolean evaluate(String jsonMessage) throws FilterException { try { JsonNode message = objectMapper.readTree(jsonMessage); - if (instrumentation.isDebugEnabled()) { - instrumentation.logDebug("Json Message: \n {}", message.toPrettyString()); + if (firehoseInstrumentation.isDebugEnabled()) { + firehoseInstrumentation.logDebug("Json Message: \n {}", message.toPrettyString()); } Set validationErrors = schema.validate(message); validationErrors.forEach(error -> { - instrumentation.logDebug("Message filtered out due to: {}", error.getMessage()); + firehoseInstrumentation.logDebug("Message filtered out due to: {}", error.getMessage()); }); return validationErrors.isEmpty(); } catch (JsonProcessingException e) { diff --git a/src/main/java/io/odpf/firehose/filter/json/JsonFilterUtil.java b/src/main/java/io/odpf/firehose/filter/json/JsonFilterUtil.java index 369408219..198bee9d5 100644 --- a/src/main/java/io/odpf/firehose/filter/json/JsonFilterUtil.java +++ b/src/main/java/io/odpf/firehose/filter/json/JsonFilterUtil.java @@ -1,7 +1,7 @@ package io.odpf.firehose.filter.json; import io.odpf.firehose.config.FilterConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.experimental.UtilityClass; import static io.odpf.firehose.config.enums.FilterMessageFormatType.PROTOBUF; @@ -16,14 +16,14 @@ public class JsonFilterUtil { * Log configs. * * @param filterConfig the filter config - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation */ - public static void logConfigs(FilterConfig filterConfig, Instrumentation instrumentation) { - instrumentation.logInfo("\n\tFilter data source type: {}", filterConfig.getFilterDataSource()); - instrumentation.logInfo("\n\tFilter JSON Schema: {}", filterConfig.getFilterJsonSchema()); - instrumentation.logInfo("\n\tFilter ESB message format: {}", filterConfig.getFilterESBMessageFormat()); + public static void logConfigs(FilterConfig filterConfig, FirehoseInstrumentation firehoseInstrumentation) { + firehoseInstrumentation.logInfo("\n\tFilter data source type: {}", filterConfig.getFilterDataSource()); + firehoseInstrumentation.logInfo("\n\tFilter JSON Schema: {}", filterConfig.getFilterJsonSchema()); + firehoseInstrumentation.logInfo("\n\tFilter ESB message format: {}", filterConfig.getFilterESBMessageFormat()); if (filterConfig.getFilterESBMessageFormat() == PROTOBUF) { - instrumentation.logInfo("\n\tMessage Proto class: {}", filterConfig.getFilterSchemaProtoClass()); + firehoseInstrumentation.logInfo("\n\tMessage Proto class: {}", filterConfig.getFilterSchemaProtoClass()); } } @@ -31,19 +31,19 @@ public static void logConfigs(FilterConfig filterConfig, Instrumentation instrum * Validate configs. * * @param filterConfig the filter config - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation */ - public static void validateConfigs(FilterConfig filterConfig, Instrumentation instrumentation) { + public static void validateConfigs(FilterConfig filterConfig, FirehoseInstrumentation firehoseInstrumentation) { if (filterConfig.getFilterJsonSchema() == null) { - instrumentation.logError("Failed to create filter due to invalid config"); + firehoseInstrumentation.logError("Failed to create filter due to invalid config"); throw new IllegalArgumentException("Filter JSON Schema is invalid"); } if (filterConfig.getFilterESBMessageFormat() == null) { - instrumentation.logError("Failed to create filter due to invalid config"); + firehoseInstrumentation.logError("Failed to create filter due to invalid config"); throw new IllegalArgumentException("Filter ESB message type cannot be null"); } if (filterConfig.getFilterESBMessageFormat() == PROTOBUF && filterConfig.getFilterSchemaProtoClass() == null) { - instrumentation.logError("Failed to create filter due to invalid config"); + firehoseInstrumentation.logError("Failed to create filter due to invalid config"); throw new IllegalArgumentException("Proto Schema class cannot be null"); } } diff --git a/src/main/java/io/odpf/firehose/launch/Main.java b/src/main/java/io/odpf/firehose/launch/Main.java index d6ef115dc..541b5fe07 100644 --- a/src/main/java/io/odpf/firehose/launch/Main.java +++ b/src/main/java/io/odpf/firehose/launch/Main.java @@ -1,11 +1,13 @@ package io.odpf.firehose.launch; +import io.odpf.depot.config.MetricsConfig; +import io.odpf.depot.metrics.StatsDReporter; +import io.odpf.depot.metrics.StatsDReporterBuilder; import io.odpf.firehose.config.KafkaConsumerConfig; import io.odpf.firehose.consumer.FirehoseConsumer; import io.odpf.firehose.consumer.FirehoseConsumerFactory; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; -import io.odpf.firehose.metrics.StatsDReporterFactory; +import io.odpf.firehose.metrics.FirehoseInstrumentation; +import io.odpf.firehose.metrics.Metrics; import org.aeonbits.owner.ConfigFactory; import java.io.IOException; @@ -27,17 +29,18 @@ public static void main(String[] args) throws InterruptedException { } private static void multiThreadedConsumers(KafkaConsumerConfig kafkaConsumerConfig) throws InterruptedException { - StatsDReporter statsDReporter = StatsDReporterFactory - .fromKafkaConsumerConfig(kafkaConsumerConfig) - .buildReporter(); - Instrumentation instrumentation = new Instrumentation(statsDReporter, Main.class); - instrumentation.logInfo("Number of consumer threads: " + kafkaConsumerConfig.getApplicationThreadCount()); - instrumentation.logInfo("Delay to clean up consumer threads in ms: " + kafkaConsumerConfig.getApplicationThreadCleanupDelay()); + MetricsConfig config = ConfigFactory.create(MetricsConfig.class, System.getenv()); + StatsDReporter statsDReporter = StatsDReporterBuilder.builder().withMetricConfig(config) + .withExtraTags(Metrics.tag(Metrics.CONSUMER_GROUP_ID_TAG, kafkaConsumerConfig.getSourceKafkaConsumerGroupId())) + .build(); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, Main.class); + firehoseInstrumentation.logInfo("Number of consumer threads: " + kafkaConsumerConfig.getApplicationThreadCount()); + firehoseInstrumentation.logInfo("Delay to clean up consumer threads in ms: " + kafkaConsumerConfig.getApplicationThreadCleanupDelay()); Task consumerTask = new Task( kafkaConsumerConfig.getApplicationThreadCount(), kafkaConsumerConfig.getApplicationThreadCleanupDelay(), - new Instrumentation(statsDReporter, Task.class), + new FirehoseInstrumentation(statsDReporter, Task.class), taskFinished -> { FirehoseConsumer firehoseConsumer = null; @@ -45,36 +48,36 @@ private static void multiThreadedConsumers(KafkaConsumerConfig kafkaConsumerConf firehoseConsumer = new FirehoseConsumerFactory(kafkaConsumerConfig, statsDReporter).buildConsumer(); while (true) { if (Thread.interrupted()) { - instrumentation.logWarn("Consumer Thread interrupted, leaving the loop!"); + firehoseInstrumentation.logWarn("Consumer Thread interrupted, leaving the loop!"); break; } firehoseConsumer.process(); } } catch (Exception | Error e) { - instrumentation.captureFatalError(e, "Caught exception or error, exiting the application"); + firehoseInstrumentation.captureFatalError("firehose_error_event", e, "Caught exception or error, exiting the application"); System.exit(1); } finally { - ensureThreadInterruptStateIsClearedAndClose(firehoseConsumer, instrumentation); + ensureThreadInterruptStateIsClearedAndClose(firehoseConsumer, firehoseInstrumentation); taskFinished.run(); } }); - instrumentation.logInfo("Consumer Task Created"); + firehoseInstrumentation.logInfo("Consumer Task Created"); Runtime.getRuntime().addShutdownHook(new Thread(() -> { - instrumentation.logInfo("Program is going to exit. Have started execution of shutdownHook before this"); + firehoseInstrumentation.logInfo("Program is going to exit. Have started execution of shutdownHook before this"); consumerTask.stop(); })); consumerTask.run().waitForCompletion(); - instrumentation.logInfo("Exiting main thread"); + firehoseInstrumentation.logInfo("Exiting main thread"); } - private static void ensureThreadInterruptStateIsClearedAndClose(FirehoseConsumer firehoseConsumer, Instrumentation instrumentation) { + private static void ensureThreadInterruptStateIsClearedAndClose(FirehoseConsumer firehoseConsumer, FirehoseInstrumentation firehoseInstrumentation) { Thread.interrupted(); try { firehoseConsumer.close(); } catch (IOException e) { - instrumentation.captureFatalError(e, "Exception on closing firehose consumer"); + firehoseInstrumentation.captureFatalError("firehose_error_event", e, "Exception on closing firehose consumer"); } } } diff --git a/src/main/java/io/odpf/firehose/launch/Task.java b/src/main/java/io/odpf/firehose/launch/Task.java index c54b45a76..35e087e79 100644 --- a/src/main/java/io/odpf/firehose/launch/Task.java +++ b/src/main/java/io/odpf/firehose/launch/Task.java @@ -8,7 +8,7 @@ import java.util.concurrent.Future; import java.util.function.Consumer; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; /** * The Task with parallelism. @@ -22,17 +22,17 @@ public class Task { private Runnable taskFinishCallback; private final CountDownLatch countDownLatch; private final List> fnFutures; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; /** * Instantiates a new Task. * - * @param parallelism the parallelism - * @param threadCleanupDelay the thread cleanup delay - * @param instrumentation the instrumentation - * @param task the task + * @param parallelism the parallelism + * @param threadCleanupDelay the thread cleanup delay + * @param firehoseInstrumentation the instrumentation + * @param task the task */ - public Task(int parallelism, int threadCleanupDelay, Instrumentation instrumentation, Consumer task) { + public Task(int parallelism, int threadCleanupDelay, FirehoseInstrumentation firehoseInstrumentation, Consumer task) { executorService = Executors.newFixedThreadPool(parallelism); this.parallelism = parallelism; this.threadCleanupDelay = threadCleanupDelay; @@ -40,7 +40,7 @@ public Task(int parallelism, int threadCleanupDelay, Instrumentation instrumenta this.countDownLatch = new CountDownLatch(parallelism); this.fnFutures = new ArrayList<>(parallelism); taskFinishCallback = countDownLatch::countDown; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } public Task run() { @@ -53,18 +53,18 @@ public Task run() { } public void waitForCompletion() throws InterruptedException { - instrumentation.logInfo("waiting for completion"); + firehoseInstrumentation.logInfo("waiting for completion"); countDownLatch.await(); } public Task stop() { try { - instrumentation.logInfo("Stopping task thread"); + firehoseInstrumentation.logInfo("Stopping task thread"); fnFutures.forEach(consumerThread -> consumerThread.cancel(true)); - instrumentation.logInfo("Sleeping thread during clean up for {} duration", threadCleanupDelay); + firehoseInstrumentation.logInfo("Sleeping thread during clean up for {} duration", threadCleanupDelay); Thread.sleep(threadCleanupDelay); } catch (InterruptedException e) { - instrumentation.captureNonFatalError(e, "error stopping tasks"); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, "error stopping tasks"); } return this; } diff --git a/src/main/java/io/odpf/firehose/message/FirehoseMessageUtils.java b/src/main/java/io/odpf/firehose/message/FirehoseMessageUtils.java new file mode 100644 index 000000000..b23f8ebe1 --- /dev/null +++ b/src/main/java/io/odpf/firehose/message/FirehoseMessageUtils.java @@ -0,0 +1,24 @@ +package io.odpf.firehose.message; + +import io.odpf.depot.common.Tuple; +import io.odpf.depot.message.OdpfMessage; + +import java.util.List; +import java.util.stream.Collectors; + +public class FirehoseMessageUtils { + + public static List convertToOdpfMessage(List messages) { + return messages.stream().map(message -> + new OdpfMessage( + message.getLogKey(), + message.getLogMessage(), + new Tuple<>("message_topic", message.getTopic()), + new Tuple<>("message_partition", message.getPartition()), + new Tuple<>("message_offset", message.getOffset()), + new Tuple<>("message_headers", message.getHeaders()), + new Tuple<>("message_timestamp", message.getTimestamp()), + new Tuple<>("load_time", message.getConsumeTimestamp()))) + .collect(Collectors.toList()); + } +} diff --git a/src/main/java/io/odpf/firehose/message/Message.java b/src/main/java/io/odpf/firehose/message/Message.java index 79cd0579f..131d93f4e 100644 --- a/src/main/java/io/odpf/firehose/message/Message.java +++ b/src/main/java/io/odpf/firehose/message/Message.java @@ -1,8 +1,8 @@ package io.odpf.firehose.message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.exception.DefaultException; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; diff --git a/src/main/java/io/odpf/firehose/metrics/FirehoseInstrumentation.java b/src/main/java/io/odpf/firehose/metrics/FirehoseInstrumentation.java new file mode 100644 index 000000000..3652269a5 --- /dev/null +++ b/src/main/java/io/odpf/firehose/metrics/FirehoseInstrumentation.java @@ -0,0 +1,159 @@ +package io.odpf.firehose.metrics; + +import io.odpf.depot.error.ErrorType; +import io.odpf.depot.metrics.Instrumentation; +import io.odpf.depot.metrics.StatsDReporter; +import io.odpf.firehose.message.Message; +import org.slf4j.Logger; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; + +import static io.odpf.firehose.metrics.Metrics.ERROR_MESSAGES_TOTAL; +import static io.odpf.firehose.metrics.Metrics.ERROR_TYPE_TAG; +import static io.odpf.firehose.metrics.Metrics.GLOBAL_MESSAGES_TOTAL; +import static io.odpf.firehose.metrics.Metrics.MESSAGE_SCOPE_TAG; +import static io.odpf.firehose.metrics.Metrics.MESSAGE_TYPE_TAG; +import static io.odpf.firehose.metrics.Metrics.MessageType; +import static io.odpf.firehose.metrics.Metrics.PIPELINE_END_LATENCY_MILLISECONDS; +import static io.odpf.firehose.metrics.Metrics.PIPELINE_EXECUTION_LIFETIME_MILLISECONDS; +import static io.odpf.firehose.metrics.Metrics.SINK_PUSH_BATCH_SIZE_TOTAL; +import static io.odpf.firehose.metrics.Metrics.SINK_RESPONSE_TIME_MILLISECONDS; +import static io.odpf.firehose.metrics.Metrics.SOURCE_KAFKA_MESSAGES_FILTER_TOTAL; +import static io.odpf.firehose.metrics.Metrics.SOURCE_KAFKA_PULL_BATCH_SIZE_TOTAL; + +/** + * Instrumentation. + *

+ * Handle logging and metric capturing. + */ +public class FirehoseInstrumentation extends Instrumentation { + + private Instant startExecutionTime; + + /** + * Instantiates a new Instrumentation. + * + * @param statsDReporter the stats d reporter + * @param logger the logger + */ + public FirehoseInstrumentation(StatsDReporter statsDReporter, Logger logger) { + super(statsDReporter, logger); + } + + /** + * Instantiates a new Instrumentation. + * + * @param statsDReporter the stats d reporter + * @param clazz the clazz + */ + public FirehoseInstrumentation(StatsDReporter statsDReporter, Class clazz) { + super(statsDReporter, clazz); + } + + /** + * Gets start execution time. + * + * @return the start execution time + */ + public Instant getStartExecutionTime() { + return startExecutionTime; + } + // =================== LOGGING =================== + + // ============== FILTER MESSAGES ============== + + /** + * Captures batch message histogram. + * + * @param pulledMessageCount the pulled message count + */ + public void capturePulledMessageHistogram(long pulledMessageCount) { + captureHistogram(SOURCE_KAFKA_PULL_BATCH_SIZE_TOTAL, pulledMessageCount); + } + + /** + * Captures filtered message count. + * + * @param filteredMessageCount the filtered message count + */ + public void captureFilteredMessageCount(long filteredMessageCount) { + captureCount(SOURCE_KAFKA_MESSAGES_FILTER_TOTAL, filteredMessageCount); + } + + + // ================ SinkExecutionTelemetry ================ + + public Instant startExecution() { + startExecutionTime = Instant.now(); + return startExecutionTime; + } + + /** + * Logs total messages executions. + * + * @param sinkType the sink type + * @param messageListSize the message list size + */ + public void captureSinkExecutionTelemetry(String sinkType, Integer messageListSize) { + logInfo("Processed {} messages in {}.", messageListSize, sinkType); + captureDurationSince(SINK_RESPONSE_TIME_MILLISECONDS, this.startExecutionTime); + } + + /** + * @param totalMessages total messages + */ + public void captureMessageBatchSize(long totalMessages) { + captureHistogram(SINK_PUSH_BATCH_SIZE_TOTAL, totalMessages); + } + + public void captureErrorMetrics(List errors) { + errors.forEach(this::captureErrorMetrics); + } + + public void captureErrorMetrics(ErrorType errorType) { + captureCount(ERROR_MESSAGES_TOTAL, 1L, String.format(ERROR_TYPE_TAG, errorType.name())); + } + + // =================== Retry and DLQ Telemetry ====================== + + public void captureMessageMetrics(String metric, MessageType type, ErrorType errorType, long counter) { + if (errorType != null) { + captureCount(metric, counter, String.format(MESSAGE_TYPE_TAG, type.name()), String.format(ERROR_TYPE_TAG, errorType.name())); + } else { + captureCount(metric, counter, String.format(MESSAGE_TYPE_TAG, type.name())); + } + } + + public void captureGlobalMessageMetrics(Metrics.MessageScope scope, long counter) { + captureCount(GLOBAL_MESSAGES_TOTAL, counter, String.format(MESSAGE_SCOPE_TAG, scope.name())); + } + + public void captureMessageMetrics(String metric, MessageType type, int counter) { + captureMessageMetrics(metric, type, null, counter); + } + + public void captureDLQErrors(Message message, Exception e) { + captureNonFatalError("firehose_error_event", e, "Unable to send record with key {} and message {} to DLQ", message.getLogKey(), message.getLogMessage()); + } + + // ===================== Latency / LifetimeTillSink ===================== + + public void capturePreExecutionLatencies(List messages) { + messages.forEach(message -> { + captureDurationSince(PIPELINE_END_LATENCY_MILLISECONDS, Instant.ofEpochMilli(message.getTimestamp())); + captureDurationSince(PIPELINE_EXECUTION_LIFETIME_MILLISECONDS, Instant.ofEpochMilli(message.getConsumeTimestamp())); + }); + } + + public void captureSleepTime(String metric, int sleepTime) { + captureValue(metric, sleepTime); + } + + // ===================== closing ================= + + public void close() throws IOException { + super.close(); + } +} diff --git a/src/main/java/io/odpf/firehose/metrics/Instrumentation.java b/src/main/java/io/odpf/firehose/metrics/Instrumentation.java deleted file mode 100644 index 19b24e5a2..000000000 --- a/src/main/java/io/odpf/firehose/metrics/Instrumentation.java +++ /dev/null @@ -1,259 +0,0 @@ -package io.odpf.firehose.metrics; - -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.time.Instant; -import java.util.List; - -import static io.odpf.firehose.metrics.Metrics.ERROR_EVENT; -import static io.odpf.firehose.metrics.Metrics.ERROR_MESSAGES_TOTAL; -import static io.odpf.firehose.metrics.Metrics.ERROR_MESSAGE_CLASS_TAG; -import static io.odpf.firehose.metrics.Metrics.ERROR_TYPE_TAG; -import static io.odpf.firehose.metrics.Metrics.FATAL_ERROR; -import static io.odpf.firehose.metrics.Metrics.GLOBAL_MESSAGES_TOTAL; -import static io.odpf.firehose.metrics.Metrics.MESSAGE_SCOPE_TAG; -import static io.odpf.firehose.metrics.Metrics.MESSAGE_TYPE_TAG; -import static io.odpf.firehose.metrics.Metrics.MessageType; -import static io.odpf.firehose.metrics.Metrics.NON_FATAL_ERROR; -import static io.odpf.firehose.metrics.Metrics.PIPELINE_END_LATENCY_MILLISECONDS; -import static io.odpf.firehose.metrics.Metrics.PIPELINE_EXECUTION_LIFETIME_MILLISECONDS; -import static io.odpf.firehose.metrics.Metrics.SINK_PUSH_BATCH_SIZE_TOTAL; -import static io.odpf.firehose.metrics.Metrics.SINK_RESPONSE_TIME_MILLISECONDS; -import static io.odpf.firehose.metrics.Metrics.SOURCE_KAFKA_MESSAGES_FILTER_TOTAL; -import static io.odpf.firehose.metrics.Metrics.SOURCE_KAFKA_PULL_BATCH_SIZE_TOTAL; - -/** - * Instrumentation. - *

- * Handle logging and metric capturing. - */ -public class Instrumentation { - - private final StatsDReporter statsDReporter; - private final Logger logger; - private Instant startExecutionTime; - - /** - * Instantiates a new Instrumentation. - * - * @param statsDReporter the stats d reporter - * @param logger the logger - */ - public Instrumentation(StatsDReporter statsDReporter, Logger logger) { - this.statsDReporter = statsDReporter; - this.logger = logger; - } - - /** - * Instantiates a new Instrumentation. - * - * @param statsDReporter the stats d reporter - * @param clazz the clazz - */ - public Instrumentation(StatsDReporter statsDReporter, Class clazz) { - this.statsDReporter = statsDReporter; - this.logger = LoggerFactory.getLogger(clazz); - } - - /** - * Gets start execution time. - * - * @return the start execution time - */ - public Instant getStartExecutionTime() { - return startExecutionTime; - } - // =================== LOGGING =================== - - public void logInfo(String message) { - logger.info(message); - } - - public void logInfo(String template, Object... t) { - logger.info(template, t); - } - - public void logWarn(String template, Object... t) { - logger.warn(template, t); - } - - public void logDebug(String template, Object... t) { - logger.debug(template, t); - } - - public void logError(String template, Object... t) { - logger.error(template, t); - } - - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - // ============== FILTER MESSAGES ============== - - /** - * Captures batch message histogram. - * - * @param pulledMessageCount the pulled message count - */ - public void capturePulledMessageHistogram(long pulledMessageCount) { - statsDReporter.captureHistogram(SOURCE_KAFKA_PULL_BATCH_SIZE_TOTAL, pulledMessageCount); - } - - /** - * Captures filtered message count. - * - * @param filteredMessageCount the filtered message count - */ - public void captureFilteredMessageCount(int filteredMessageCount) { - statsDReporter.captureCount(SOURCE_KAFKA_MESSAGES_FILTER_TOTAL, filteredMessageCount); - } - - // =================== ERROR =================== - - public void captureNonFatalError(Exception e) { - logger.warn(e.getMessage(), e); - statsDReporter.recordEvent(ERROR_EVENT, NON_FATAL_ERROR, errorTag(e, NON_FATAL_ERROR)); - } - - public void captureNonFatalError(Exception e, String message) { - logger.warn(message); - captureNonFatalError(e); - } - - public void captureNonFatalError(Exception e, String template, Object... t) { - logger.warn(template, t); - captureNonFatalError(e); - } - - public void captureFatalError(Throwable e) { - logger.error(e.getMessage(), e); - statsDReporter.recordEvent(ERROR_EVENT, FATAL_ERROR, errorTag(e, FATAL_ERROR)); - } - - public void captureFatalError(Throwable e, String message) { - logger.error(message); - this.captureFatalError(e); - } - - public void captureFatalError(Exception e, String message) { - logger.error(message); - this.captureFatalError(e); - } - - public void captureFatalError(Exception e, String template, Object... t) { - logger.error(template, t); - this.captureFatalError(e); - } - - private String errorTag(Throwable e, String errorType) { - return ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + errorType; - } - - // ================ SinkExecutionTelemetry ================ - - public Instant startExecution() { - startExecutionTime = Instant.now(); - return startExecutionTime; - } - - /** - * Logs total messages executions. - * - * @param sinkType the sink type - * @param messageListSize the message list size - */ - public void captureSinkExecutionTelemetry(String sinkType, Integer messageListSize) { - logger.info("Processed {} messages in {}.", messageListSize, sinkType); - statsDReporter.captureDurationSince(SINK_RESPONSE_TIME_MILLISECONDS, this.startExecutionTime); - } - - /** - * @param totalMessages total messages - */ - public void captureMessageBatchSize(int totalMessages) { - statsDReporter.captureHistogramWithTags(SINK_PUSH_BATCH_SIZE_TOTAL, totalMessages); - } - - public void captureErrorMetrics(List errors) { - errors.forEach(this::captureErrorMetrics); - } - - public void captureErrorMetrics(ErrorType errorType) { - statsDReporter.captureCount(ERROR_MESSAGES_TOTAL, 1, String.format(ERROR_TYPE_TAG, errorType.name())); - } - - // =================== Retry and DLQ Telemetry ====================== - - public void captureMessageMetrics(String metric, MessageType type, ErrorType errorType, int counter) { - if (errorType != null) { - statsDReporter.captureCount(metric, counter, String.format(MESSAGE_TYPE_TAG, type.name()), String.format(ERROR_TYPE_TAG, errorType.name())); - } else { - statsDReporter.captureCount(metric, counter, String.format(MESSAGE_TYPE_TAG, type.name())); - } - } - - public void captureGlobalMessageMetrics(Metrics.MessageScope scope, int counter) { - statsDReporter.captureCount(GLOBAL_MESSAGES_TOTAL, counter, String.format(MESSAGE_SCOPE_TAG, scope.name())); - } - - public void captureMessageMetrics(String metric, MessageType type, int counter) { - captureMessageMetrics(metric, type, null, counter); - } - - public void captureDLQErrors(Message message, Exception e) { - captureNonFatalError(e, "Unable to send record with key {} and message {} to DLQ", message.getLogKey(), message.getLogMessage()); - } - - // ===================== Latency / LifetimeTillSink ===================== - - public void capturePreExecutionLatencies(List messages) { - messages.forEach(message -> { - statsDReporter.captureDurationSince(PIPELINE_END_LATENCY_MILLISECONDS, Instant.ofEpochMilli(message.getTimestamp())); - statsDReporter.captureDurationSince(PIPELINE_EXECUTION_LIFETIME_MILLISECONDS, Instant.ofEpochMilli(message.getConsumeTimestamp())); - }); - } - - public void captureDurationSince(String metric, Instant instant, String... tags) { - statsDReporter.captureDurationSince(metric, instant, tags); - } - - public void captureDuration(String metric, long duration, String... tags) { - statsDReporter.captureDuration(metric, duration, tags); - } - - public void captureSleepTime(String metric, int sleepTime) { - statsDReporter.gauge(metric, sleepTime); - } - - // ===================== CountTelemetry ================= - - public void captureCount(String metric, Integer count, String... tags) { - statsDReporter.captureCount(metric, count, tags); - } - - public void captureCount(String metric, Long count, String... tags) { - statsDReporter.captureCount(metric, count, tags); - } - - public void incrementCounter(String metric, String... tags) { - statsDReporter.increment(metric, tags); - } - - public void incrementCounter(String metric) { - statsDReporter.increment(metric); - } - - public void captureValue(String metric, Integer value, String... tags) { - statsDReporter.gauge(metric, value, tags); - } - - // ===================== closing ================= - - public void close() throws IOException { - statsDReporter.close(); - } -} diff --git a/src/main/java/io/odpf/firehose/metrics/StatsDReporter.java b/src/main/java/io/odpf/firehose/metrics/StatsDReporter.java deleted file mode 100644 index 43bb5e7f2..000000000 --- a/src/main/java/io/odpf/firehose/metrics/StatsDReporter.java +++ /dev/null @@ -1,91 +0,0 @@ -package io.odpf.firehose.metrics; - -import com.timgroup.statsd.StatsDClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.time.Duration; -import java.time.Instant; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * Statsd reporter for firehose. - */ -public class StatsDReporter implements Closeable { - - private final StatsDClient client; - private final String globalTags; - private static final Logger LOGGER = LoggerFactory.getLogger(StatsDReporter.class); - - public StatsDReporter(StatsDClient client, String... globalTags) { - this.client = client; - this.globalTags = String.join(",", globalTags).replaceAll(":", "="); - } - - public StatsDClient getClient() { - return client; - } - - public void captureCount(String metric, Integer delta, String... tags) { - client.count(withTags(metric, tags), delta); - } - - public void captureCount(String metric, Long delta, String... tags) { - client.count(withTags(metric, tags), delta); - } - - public void captureCount(String metric, Integer delta) { - client.count(withGlobalTags(metric), delta); - } - - public void captureHistogramWithTags(String metric, long delta, String... tags) { - client.time(withTags(metric, tags), delta); - } - - public void captureHistogram(String metric, long delta) { - client.time(withGlobalTags(metric), delta); - } - - public void captureDurationSince(String metric, Instant startTime, String... tags) { - client.recordExecutionTime(withTags(metric, tags), Duration.between(startTime, Instant.now()).toMillis()); - } - - public void captureDuration(String metric, long duration, String... tags) { - client.recordExecutionTime(withTags(metric, tags), duration); - } - - public void gauge(String metric, Integer value, String... tags) { - client.gauge(withTags(metric, tags), value); - } - - public void increment(String metric, String... tags) { - captureCount(metric, 1, tags); - } - - public void increment(String metric) { - captureCount(metric, 1); - } - - public void recordEvent(String metric, String eventName, String... tags) { - client.recordSetValue(withTags(metric, tags), eventName); - } - - private String withGlobalTags(String metric) { - return metric + "," + this.globalTags; - } - - private String withTags(String metric, String... tags) { - return Stream.concat(Stream.of(withGlobalTags(metric)), Stream.of(tags)) - .collect(Collectors.joining(",")); - } - - @Override - public void close() throws IOException { - LOGGER.info("StatsD connection closed"); - client.stop(); - } - -} diff --git a/src/main/java/io/odpf/firehose/metrics/StatsDReporterFactory.java b/src/main/java/io/odpf/firehose/metrics/StatsDReporterFactory.java deleted file mode 100644 index 84bec68b6..000000000 --- a/src/main/java/io/odpf/firehose/metrics/StatsDReporterFactory.java +++ /dev/null @@ -1,61 +0,0 @@ -package io.odpf.firehose.metrics; - -import com.timgroup.statsd.NoOpStatsDClient; -import com.timgroup.statsd.NonBlockingStatsDClientBuilder; -import com.timgroup.statsd.StatsDClient; -import io.odpf.firehose.config.KafkaConsumerConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * StatsDReporterFactory - *

- * Create statsDReporter Instance. - */ -public class StatsDReporterFactory { - - private final String statsDHost; - private final Integer statsDPort; - private final String[] globalTags; - private static final Logger LOGGER = LoggerFactory.getLogger(StatsDReporterFactory.class); - - public StatsDReporterFactory(String statsDHost, Integer statsDPort, String[] globalTags) { - this.statsDHost = statsDHost; - this.statsDPort = statsDPort; - this.globalTags = globalTags; - LOGGER.debug("\n\tStatsd Host: {}\n\tStatsd Port: {}\n\tStatsd Tags: {}", this.statsDHost, this.statsDPort, this.globalTags); - } - - private static T[] append(T[] arr, T lastElement) { - final int length = arr.length; - arr = java.util.Arrays.copyOf(arr, length + 1); - arr[length] = lastElement; - return arr; - } - - public static StatsDReporterFactory fromKafkaConsumerConfig(KafkaConsumerConfig kafkaConsumerConfig) { - return new StatsDReporterFactory( - kafkaConsumerConfig.getMetricStatsDHost(), - kafkaConsumerConfig.getMetricStatsDPort(), - append(kafkaConsumerConfig.getMetricStatsDTags().split(","), - Metrics.tag(Metrics.CONSUMER_GROUP_ID_TAG, kafkaConsumerConfig.getSourceKafkaConsumerGroupId()))); - } - - public StatsDReporter buildReporter() { - StatsDClient statsDClient = buildStatsDClient(); - return new StatsDReporter(statsDClient, globalTags); - } - - private StatsDClient buildStatsDClient() { - StatsDClient statsDClient; - try { - statsDClient = new NonBlockingStatsDClientBuilder().hostname(statsDHost).port(statsDPort).build(); - LOGGER.info("NonBlocking StatsD client connection established"); - } catch (Exception e) { - LOGGER.warn("Exception on creating StatsD client, disabling StatsD and Audit client", e); - LOGGER.warn("Firehose is running without collecting any metrics!!!!!!!!"); - statsDClient = new NoOpStatsDClient(); - } - return statsDClient; - } -} diff --git a/src/main/java/io/odpf/firehose/serializer/MessageToTemplatizedJson.java b/src/main/java/io/odpf/firehose/serializer/MessageToTemplatizedJson.java index 56cadf78b..189064a42 100644 --- a/src/main/java/io/odpf/firehose/serializer/MessageToTemplatizedJson.java +++ b/src/main/java/io/odpf/firehose/serializer/MessageToTemplatizedJson.java @@ -4,7 +4,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import com.google.gson.Gson; import com.google.protobuf.DynamicMessage; import com.google.protobuf.InvalidProtocolBufferException; @@ -32,10 +32,10 @@ public class MessageToTemplatizedJson implements MessageSerializer { private Parser protoParser; private HashSet pathsToReplace; private JSONParser jsonParser; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - public static MessageToTemplatizedJson create(Instrumentation instrumentation, String httpSinkJsonBodyTemplate, Parser protoParser) { - MessageToTemplatizedJson messageToTemplatizedJson = new MessageToTemplatizedJson(instrumentation, httpSinkJsonBodyTemplate, protoParser); + public static MessageToTemplatizedJson create(FirehoseInstrumentation firehoseInstrumentation, String httpSinkJsonBodyTemplate, Parser protoParser) { + MessageToTemplatizedJson messageToTemplatizedJson = new MessageToTemplatizedJson(firehoseInstrumentation, httpSinkJsonBodyTemplate, protoParser); if (messageToTemplatizedJson.isInvalidJson()) { throw new ConfigurationException("Given HTTPSink JSON body template :" + httpSinkJsonBodyTemplate @@ -45,12 +45,12 @@ public static MessageToTemplatizedJson create(Instrumentation instrumentation, S return messageToTemplatizedJson; } - public MessageToTemplatizedJson(Instrumentation instrumentation, String httpSinkJsonBodyTemplate, Parser protoParser) { + public MessageToTemplatizedJson(FirehoseInstrumentation firehoseInstrumentation, String httpSinkJsonBodyTemplate, Parser protoParser) { this.httpSinkJsonBodyTemplate = httpSinkJsonBodyTemplate; this.protoParser = protoParser; this.jsonParser = new JSONParser(); this.gson = new Gson(); - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } private void setPathsFromTemplate() { @@ -61,7 +61,7 @@ private void setPathsFromTemplate() { paths.add(matcher.group(0)); } List pathList = new ArrayList<>(paths); - instrumentation.logDebug("\nPaths: {}", pathList); + firehoseInstrumentation.logDebug("\nPaths: {}", pathList); this.pathsToReplace = paths; } diff --git a/src/main/java/io/odpf/firehose/sink/AbstractSink.java b/src/main/java/io/odpf/firehose/sink/AbstractSink.java index b9da57028..e97bafa38 100644 --- a/src/main/java/io/odpf/firehose/sink/AbstractSink.java +++ b/src/main/java/io/odpf/firehose/sink/AbstractSink.java @@ -4,7 +4,7 @@ import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.ConfigurationException; import io.odpf.firehose.exception.SinkException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import lombok.AllArgsConstructor; @@ -23,7 +23,7 @@ @AllArgsConstructor public abstract class AbstractSink implements Closeable, Sink { - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final String sinkType; /** @@ -37,29 +37,29 @@ public List pushMessage(List messages) { List failedMessages = messages; Instant executionStartTime = null; try { - instrumentation.logInfo("Preparing {} messages", messages.size()); - instrumentation.captureMessageBatchSize(messages.size()); - instrumentation.captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, messages.size()); + firehoseInstrumentation.logInfo("Preparing {} messages", messages.size()); + firehoseInstrumentation.captureMessageBatchSize(messages.size()); + firehoseInstrumentation.captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, messages.size()); prepare(messages); - instrumentation.capturePreExecutionLatencies(messages); - executionStartTime = instrumentation.startExecution(); + firehoseInstrumentation.capturePreExecutionLatencies(messages); + executionStartTime = firehoseInstrumentation.startExecution(); failedMessages = execute(); - instrumentation.logInfo("Pushed {} messages", messages.size() - failedMessages.size()); + firehoseInstrumentation.logInfo("Pushed {} messages", messages.size() - failedMessages.size()); } catch (DeserializerException | ConfigurationException | NullPointerException | SinkException e) { throw e; } catch (Exception e) { if (!messages.isEmpty()) { - instrumentation.logWarn("Failed to push {} messages to sink", messages.size()); + firehoseInstrumentation.logWarn("Failed to push {} messages to sink", messages.size()); } - instrumentation.captureNonFatalError(e, "caught {} {}", e.getClass(), e.getMessage()); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, "caught {} {}", e.getClass(), e.getMessage()); failedMessages = messages; } finally { // Process success,failure and error metrics if (executionStartTime != null) { - instrumentation.captureSinkExecutionTelemetry(sinkType, messages.size()); + firehoseInstrumentation.captureSinkExecutionTelemetry(sinkType, messages.size()); } - instrumentation.captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - failedMessages.size()); - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.SINK, messages.size() - failedMessages.size()); + firehoseInstrumentation.captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - failedMessages.size()); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.SINK, messages.size() - failedMessages.size()); processFailedMessages(failedMessages); } return failedMessages; @@ -67,12 +67,12 @@ public List pushMessage(List messages) { private void processFailedMessages(List failedMessages) { if (failedMessages.size() > 0) { - instrumentation.logError("Failed to Push {} messages to sink ", failedMessages.size()); + firehoseInstrumentation.logError("Failed to Push {} messages to sink ", failedMessages.size()); failedMessages.forEach(m -> { m.setDefaultErrorIfNotPresent(); - instrumentation.captureMessageMetrics(SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1); - instrumentation.captureErrorMetrics(m.getErrorInfo().getErrorType()); - instrumentation.logError("Failed to Push message. Error: {},Topic: {}, Partition: {},Offset: {}", + firehoseInstrumentation.captureMessageMetrics(SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1); + firehoseInstrumentation.captureErrorMetrics(m.getErrorInfo().getErrorType()); + firehoseInstrumentation.logError("Failed to Push message. Error: {},Topic: {}, Partition: {},Offset: {}", m.getErrorInfo().getErrorType(), m.getTopic(), m.getPartition(), @@ -86,8 +86,8 @@ private void processFailedMessages(List failedMessages) { * * @return the instrumentation */ - public Instrumentation getInstrumentation() { - return instrumentation; + public FirehoseInstrumentation getFirehoseInstrumentation() { + return firehoseInstrumentation; } /** diff --git a/src/main/java/io/odpf/firehose/sink/GenericOdpfSink.java b/src/main/java/io/odpf/firehose/sink/GenericOdpfSink.java new file mode 100644 index 000000000..0bc698650 --- /dev/null +++ b/src/main/java/io/odpf/firehose/sink/GenericOdpfSink.java @@ -0,0 +1,48 @@ +package io.odpf.firehose.sink; + +import io.odpf.depot.OdpfSink; +import io.odpf.depot.OdpfSinkResponse; +import io.odpf.depot.message.OdpfMessage; +import io.odpf.firehose.exception.DeserializerException; +import io.odpf.firehose.message.FirehoseMessageUtils; +import io.odpf.firehose.message.Message; +import io.odpf.firehose.metrics.FirehoseInstrumentation; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class GenericOdpfSink extends AbstractSink { + private final List messageList = new ArrayList<>(); + private final OdpfSink odpfSink; + + public GenericOdpfSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, OdpfSink odpfSink) { + super(firehoseInstrumentation, sinkType); + this.odpfSink = odpfSink; + } + + @Override + protected List execute() throws Exception { + List odpfMessages = FirehoseMessageUtils.convertToOdpfMessage(messageList); + OdpfSinkResponse response = odpfSink.pushToSink(odpfMessages); + return response.getErrors().keySet().stream() + .map(index -> { + Message message = messageList.get(index.intValue()); + message.setErrorInfo(response.getErrorsFor(index)); + return message; + }).collect(Collectors.toList()); + } + + @Override + protected void prepare(List messages) throws DeserializerException, IOException, SQLException { + messageList.clear(); + messageList.addAll(messages); + } + + @Override + public void close() throws IOException { + + } +} diff --git a/src/main/java/io/odpf/firehose/sink/SinkFactory.java b/src/main/java/io/odpf/firehose/sink/SinkFactory.java index c2fc0240c..fa68edda8 100644 --- a/src/main/java/io/odpf/firehose/sink/SinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/SinkFactory.java @@ -1,18 +1,22 @@ package io.odpf.firehose.sink; +import io.odpf.depot.bigquery.BigQuerySink; +import io.odpf.depot.bigquery.BigQuerySinkFactory; +import io.odpf.depot.log.LogSink; +import io.odpf.depot.log.LogSinkFactory; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.KafkaConsumerConfig; +import io.odpf.firehose.config.enums.SinkType; import io.odpf.firehose.consumer.kafka.OffsetManager; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; -import io.odpf.firehose.sink.bigquery.BigQuerySinkFactory; +import io.odpf.firehose.metrics.FirehoseInstrumentation; +import io.odpf.firehose.sink.bigquery.BigquerySinkUtils; import io.odpf.firehose.sink.blob.BlobSinkFactory; import io.odpf.firehose.sink.elasticsearch.EsSinkFactory; import io.odpf.firehose.sink.grpc.GrpcSinkFactory; import io.odpf.firehose.sink.http.HttpSinkFactory; import io.odpf.firehose.sink.influxdb.InfluxSinkFactory; import io.odpf.firehose.sink.jdbc.JdbcSinkFactory; -import io.odpf.firehose.sink.log.LogSinkFactory; import io.odpf.firehose.sink.mongodb.MongoSinkFactory; import io.odpf.firehose.sink.prometheus.PromSinkFactory; import io.odpf.firehose.sink.redis.RedisSinkFactory; @@ -23,21 +27,23 @@ public class SinkFactory { private final KafkaConsumerConfig kafkaConsumerConfig; private final StatsDReporter statsDReporter; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final StencilClient stencilClient; private final OffsetManager offsetManager; private BigQuerySinkFactory bigQuerySinkFactory; - private final Map config = System.getenv(); + private LogSinkFactory logSinkFactory; + private final Map config; public SinkFactory(KafkaConsumerConfig kafkaConsumerConfig, StatsDReporter statsDReporter, StencilClient stencilClient, OffsetManager offsetManager) { - instrumentation = new Instrumentation(statsDReporter, SinkFactory.class); + firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, SinkFactory.class); this.kafkaConsumerConfig = kafkaConsumerConfig; this.statsDReporter = statsDReporter; this.stencilClient = stencilClient; this.offsetManager = offsetManager; + this.config = SinkFactoryUtils.addAdditionalConfigsForSinkConnectors(System.getenv()); } /** @@ -48,7 +54,6 @@ public void init() { case JDBC: case HTTP: case INFLUXDB: - case LOG: case ELASTICSEARCH: case REDIS: case GRPC: @@ -56,8 +61,13 @@ public void init() { case BLOB: case MONGODB: return; + case LOG: + logSinkFactory = new LogSinkFactory(config, statsDReporter); + logSinkFactory.init(); + return; case BIGQUERY: - bigQuerySinkFactory = new BigQuerySinkFactory(config, statsDReporter); + BigquerySinkUtils.addMetadataColumns(config); + bigQuerySinkFactory = new BigQuerySinkFactory(config, statsDReporter, BigquerySinkUtils.getRowIDCreator()); bigQuerySinkFactory.init(); return; default: @@ -66,8 +76,9 @@ public void init() { } public Sink getSink() { - instrumentation.logInfo("Sink Type: {}", kafkaConsumerConfig.getSinkType().toString()); - switch (kafkaConsumerConfig.getSinkType()) { + SinkType sinkType = kafkaConsumerConfig.getSinkType(); + firehoseInstrumentation.logInfo("Sink Type: {}", sinkType); + switch (sinkType) { case JDBC: return JdbcSinkFactory.create(config, statsDReporter, stencilClient); case HTTP: @@ -75,7 +86,7 @@ public Sink getSink() { case INFLUXDB: return InfluxSinkFactory.create(config, statsDReporter, stencilClient); case LOG: - return LogSinkFactory.create(config, statsDReporter, stencilClient); + return new GenericOdpfSink(new FirehoseInstrumentation(statsDReporter, LogSink.class), sinkType.name(), logSinkFactory.create()); case ELASTICSEARCH: return EsSinkFactory.create(config, statsDReporter, stencilClient); case REDIS: @@ -87,7 +98,7 @@ public Sink getSink() { case BLOB: return BlobSinkFactory.create(config, offsetManager, statsDReporter, stencilClient); case BIGQUERY: - return bigQuerySinkFactory.create(); + return new GenericOdpfSink(new FirehoseInstrumentation(statsDReporter, BigQuerySink.class), sinkType.name(), bigQuerySinkFactory.create()); case MONGODB: return MongoSinkFactory.create(config, statsDReporter, stencilClient); default: diff --git a/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java b/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java new file mode 100644 index 000000000..f5a80ce7c --- /dev/null +++ b/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java @@ -0,0 +1,19 @@ +package io.odpf.firehose.sink; + +import io.odpf.depot.message.SinkConnectorSchemaMessageMode; + +import java.util.HashMap; +import java.util.Map; + +public class SinkFactoryUtils { + protected static Map addAdditionalConfigsForSinkConnectors(Map env) { + Map finalConfig = new HashMap<>(env); + finalConfig.put("SINK_CONNECTOR_SCHEMA_MESSAGE_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); + finalConfig.put("SINK_CONNECTOR_SCHEMA_KEY_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); + finalConfig.put("SINK_METRICS_APPLICATION_PREFIX", "firehose_"); + finalConfig.put("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", env.getOrDefault("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "false")); + finalConfig.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", + env.getOrDefault("KAFKA_RECORD_PARSER_MODE", "").equals("key") ? SinkConnectorSchemaMessageMode.LOG_KEY.name() : SinkConnectorSchemaMessageMode.LOG_MESSAGE.name()); + return finalConfig; + } +} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySink.java b/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySink.java deleted file mode 100644 index f968c99d9..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySink.java +++ /dev/null @@ -1,74 +0,0 @@ -package io.odpf.firehose.sink.bigquery; - -import com.google.cloud.bigquery.InsertAllRequest; -import com.google.cloud.bigquery.InsertAllResponse; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.AbstractSink; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverterCache; -import io.odpf.firehose.sink.bigquery.handler.BigQueryClient; -import io.odpf.firehose.sink.bigquery.handler.BigQueryResponseParser; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRow; -import io.odpf.firehose.sink.bigquery.models.Record; -import io.odpf.firehose.sink.bigquery.models.Records; - -import java.io.IOException; -import java.sql.SQLException; -import java.time.Instant; -import java.util.List; -import java.util.stream.Collectors; - -public class BigQuerySink extends AbstractSink { - - private final BigQueryClient bigQueryClient; - private final BigQueryRow rowCreator; - private final Instrumentation instrumentation; - private final MessageRecordConverterCache converterCache; - private List messageList; - - public BigQuerySink(Instrumentation instrumentation, - String sinkType, - BigQueryClient client, - MessageRecordConverterCache converterCache, - BigQueryRow rowCreator) { - super(instrumentation, sinkType); - this.instrumentation = instrumentation; - this.bigQueryClient = client; - this.converterCache = converterCache; - this.rowCreator = rowCreator; - } - - @Override - protected List execute() throws Exception { - Instant now = Instant.now(); - Records records = converterCache.getMessageRecordConverter().convert(messageList, now); - List invalidMessages = records.getInvalidRecords().stream().map(Record::getMessage).collect(Collectors.toList()); - if (records.getValidRecords().size() > 0) { - InsertAllResponse response = insertIntoBQ(records.getValidRecords()); - if (response.hasErrors()) { - invalidMessages.addAll(BigQueryResponseParser.parseResponse(records.getValidRecords(), response, instrumentation)); - } - } - return invalidMessages; - } - - @Override - protected void prepare(List messages) throws DeserializerException, IOException, SQLException { - this.messageList = messages; - } - - @Override - public void close() throws IOException { - - } - - private InsertAllResponse insertIntoBQ(List records) { - InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(bigQueryClient.getTableID()); - records.forEach((Record m) -> builder.addRow(rowCreator.of(m))); - InsertAllRequest rows = builder.build(); - InsertAllResponse response = bigQueryClient.insertAll(rows); - instrumentation.logInfo("Pushed a batch of {} records to BQ. Insert success?: {}", records.size(), !response.hasErrors()); - return response; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySinkFactory.java b/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySinkFactory.java deleted file mode 100644 index 720f7446c..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/BigQuerySinkFactory.java +++ /dev/null @@ -1,71 +0,0 @@ -package io.odpf.firehose.sink.bigquery; - -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.config.enums.SinkType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; -import io.odpf.firehose.sink.Sink; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverterCache; -import io.odpf.firehose.sink.bigquery.handler.BigQueryClient; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRow; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRowWithInsertId; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRowWithoutInsertId; -import io.odpf.firehose.sink.bigquery.proto.ProtoUpdateListener; -import io.odpf.firehose.utils.StencilUtils; -import io.odpf.stencil.StencilClientFactory; -import io.odpf.stencil.client.StencilClient; -import io.odpf.stencil.Parser; -import io.odpf.stencil.config.StencilConfig; -import org.aeonbits.owner.ConfigFactory; - -import java.io.IOException; -import java.util.Map; - -public class BigQuerySinkFactory { - - private BigQueryClient bigQueryClient; - private MessageRecordConverterCache recordConverterWrapper; - private BigQueryRow rowCreator; - private final StatsDReporter statsDReporter; - private final Map config; - - public BigQuerySinkFactory(Map env, StatsDReporter statsDReporter) { - this.config = env; - this.statsDReporter = statsDReporter; - } - - public void init() { - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); - try { - this.bigQueryClient = new BigQueryClient(sinkConfig, new Instrumentation(statsDReporter, BigQueryClient.class)); - this.recordConverterWrapper = new MessageRecordConverterCache(); - StencilClient stencilClient; - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(sinkConfig, bigQueryClient, recordConverterWrapper); - StencilConfig stencilConfig = StencilUtils.getStencilConfig(sinkConfig, statsDReporter.getClient(), protoUpdateListener); - if (sinkConfig.isSchemaRegistryStencilEnable()) { - stencilClient = StencilClientFactory.getClient(sinkConfig.getSchemaRegistryStencilUrls(), stencilConfig); - } else { - stencilClient = StencilClientFactory.getClient(); - } - Parser parser = stencilClient.getParser(sinkConfig.getInputSchemaProtoClass()); - protoUpdateListener.setStencilParser(parser); - protoUpdateListener.onSchemaUpdate(stencilClient.getAll()); - if (sinkConfig.isRowInsertIdEnabled()) { - this.rowCreator = new BigQueryRowWithInsertId(); - } else { - this.rowCreator = new BigQueryRowWithoutInsertId(); - } - } catch (IOException e) { - throw new IllegalArgumentException("Exception occurred while creating sink", e); - } - } - - public Sink create() { - return new BigQuerySink( - new Instrumentation(statsDReporter, BigQuerySink.class), - SinkType.BIGQUERY.name(), - bigQueryClient, - recordConverterWrapper, - rowCreator); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtils.java b/src/main/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtils.java new file mode 100644 index 000000000..6e6b2f441 --- /dev/null +++ b/src/main/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtils.java @@ -0,0 +1,15 @@ +package io.odpf.firehose.sink.bigquery; + +import java.util.Map; +import java.util.function.Function; + +public class BigquerySinkUtils { + public static Function, String> getRowIDCreator() { + return (m -> String.format("%s_%d_%d", m.get("message_topic"), m.get("message_partition"), m.get("message_offset"))); + } + + public static void addMetadataColumns(Map config) { + config.put("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,message_topic=string,load_time=timestamp,message_timestamp=timestamp,message_partition=integer"); + } +} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverter.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverter.java deleted file mode 100644 index f4c92477e..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverter.java +++ /dev/null @@ -1,94 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import com.google.api.client.util.DateTime; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; -import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.exception.EmptyMessageException; -import io.odpf.firehose.exception.UnknownFieldsException; -import io.odpf.firehose.sink.bigquery.models.Constants; -import io.odpf.firehose.sink.bigquery.models.Record; -import io.odpf.firehose.sink.bigquery.models.Records; -import io.odpf.firehose.sink.bigquery.proto.UnknownProtoFields; -import io.odpf.firehose.proto.ProtoUtils; -import io.odpf.stencil.Parser; -import lombok.AllArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@AllArgsConstructor -@Slf4j -public class MessageRecordConverter { - private final RowMapper rowMapper; - private final Parser parser; - private final BigQuerySinkConfig config; - - public Records convert(List messages, Instant now) { - ArrayList validRecords = new ArrayList<>(); - ArrayList invalidRecords = new ArrayList<>(); - for (Message message : messages) { - try { - Record record = createRecord(now, message); - validRecords.add(record); - } catch (UnknownFieldsException e) { - message.setErrorInfo(new ErrorInfo(e, ErrorType.UNKNOWN_FIELDS_ERROR)); - invalidRecords.add(new Record(message, Collections.emptyMap())); - } catch (EmptyMessageException e) { - message.setErrorInfo(new ErrorInfo(e, ErrorType.INVALID_MESSAGE_ERROR)); - invalidRecords.add(new Record(message, Collections.emptyMap())); - } catch (DeserializerException e) { - message.setErrorInfo(new ErrorInfo(e, ErrorType.DESERIALIZATION_ERROR)); - invalidRecords.add(new Record(message, Collections.emptyMap())); - } - } - return new Records(validRecords, invalidRecords); - } - - private Record createRecord(Instant now, Message message) throws DeserializerException { - if (message.getLogMessage() == null || message.getLogMessage().length == 0) { - log.info("empty message found at offset: {}, partition: {}", message.getOffset(), message.getPartition()); - throw new EmptyMessageException(); - } - - try { - DynamicMessage dynamicMessage = parser.parse(message.getLogMessage()); - if (!config.getInputSchemaProtoAllowUnknownFieldsEnable() && ProtoUtils.hasUnknownField(dynamicMessage)) { - log.info("unknown fields found at offset: {}, partition: {}, message: {}", message.getOffset(), message.getPartition(), message); - throw new UnknownFieldsException(dynamicMessage); - } - Map columns = rowMapper.map(dynamicMessage); - addMetadata(columns, message, now); - return new Record(message, columns); - } catch (InvalidProtocolBufferException e) { - log.info("failed to deserialize message: {} at offset: {}, partition: {}", UnknownProtoFields.toString(message.getLogMessage()), - message.getOffset(), message.getPartition()); - throw new DeserializerException("failed to deserialize ", e); - } - } - - private void addMetadata(Map columns, Message message, Instant now) { - Map offsetMetadata = new HashMap<>(); - offsetMetadata.put(Constants.PARTITION_COLUMN_NAME, message.getPartition()); - offsetMetadata.put(Constants.OFFSET_COLUMN_NAME, message.getOffset()); - offsetMetadata.put(Constants.TOPIC_COLUMN_NAME, message.getTopic()); - offsetMetadata.put(Constants.TIMESTAMP_COLUMN_NAME, new DateTime(message.getTimestamp())); - offsetMetadata.put(Constants.LOAD_TIME_COLUMN_NAME, new DateTime(Date.from(now))); - - if (config.getBqMetadataNamespace().isEmpty()) { - columns.putAll(offsetMetadata); - } else { - columns.put(config.getBqMetadataNamespace(), offsetMetadata); - } - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterCache.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterCache.java deleted file mode 100644 index 0f2f56c85..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterCache.java +++ /dev/null @@ -1,8 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import lombok.Data; - -@Data -public class MessageRecordConverterCache { - private MessageRecordConverter messageRecordConverter; -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/ProtoFieldFactory.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/ProtoFieldFactory.java deleted file mode 100644 index 2401718dc..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/ProtoFieldFactory.java +++ /dev/null @@ -1,33 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import com.google.protobuf.Descriptors; -import io.odpf.firehose.sink.bigquery.converter.fields.ByteProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.DefaultProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.EnumProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.NestedProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.ProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.StructProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.TimestampProtoField; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -public class ProtoFieldFactory { - - public static ProtoField getField(Descriptors.FieldDescriptor descriptor, Object fieldValue) { - List protoFields = Arrays.asList( - new TimestampProtoField(descriptor, fieldValue), - new EnumProtoField(descriptor, fieldValue), - new ByteProtoField(descriptor, fieldValue), - new StructProtoField(descriptor, fieldValue), - new NestedProtoField(descriptor, fieldValue) - ); - Optional first = protoFields - .stream() - .filter(ProtoField::matches) - .findFirst(); - return first.orElseGet(() -> new DefaultProtoField(fieldValue)); - } - -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/RowMapper.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/RowMapper.java deleted file mode 100644 index 6a5d1faf4..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/RowMapper.java +++ /dev/null @@ -1,93 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import io.odpf.firehose.sink.bigquery.converter.fields.NestedProtoField; -import io.odpf.firehose.sink.bigquery.converter.fields.ProtoField; -import io.odpf.firehose.sink.bigquery.exception.ConfigurationException; -import io.odpf.firehose.sink.bigquery.models.Constants; -import lombok.AllArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -@Slf4j -@AllArgsConstructor -public class RowMapper { - - private final Properties mapping; - - public Map map(DynamicMessage message) { - if (mapping == null) { - throw new ConfigurationException("BQ_PROTO_COLUMN_MAPPING is not configured"); - } - return getMappings(message, mapping); - } - - private Map getMappings(DynamicMessage message, Properties columnMapping) { - if (message == null || columnMapping == null || columnMapping.isEmpty()) { - return new HashMap<>(); - } - Descriptors.Descriptor descriptorForType = message.getDescriptorForType(); - - Map row = new HashMap<>(columnMapping.size()); - columnMapping.forEach((key, value) -> { - String columnName = value.toString(); - String columnIndex = key.toString(); - if (columnIndex.equals(Constants.Config.RECORD_NAME)) { - return; - } - int protoIndex = Integer.parseInt(columnIndex); - Descriptors.FieldDescriptor fieldDesc = descriptorForType.findFieldByNumber(protoIndex); - if (fieldDesc != null && !message.getField(fieldDesc).toString().isEmpty()) { - Object field = message.getField(fieldDesc); - ProtoField protoField = ProtoFieldFactory.getField(fieldDesc, field); - Object fieldValue = protoField.getValue(); - - if (fieldValue instanceof List) { - addRepeatedFields(row, value, (List) fieldValue); - return; - } - - if (protoField.getClass().getName().equals(NestedProtoField.class.getName())) { - try { - columnName = getNestedColumnName((Properties) value); - fieldValue = getMappings((DynamicMessage) field, (Properties) value); - } catch (Exception e) { - log.error("Exception::Handling nested field failure: {}", e.getMessage()); - throw e; - } - } - row.put(columnName, fieldValue); - } - }); - return row; - } - - private String getNestedColumnName(Properties value) { - return value.get(Constants.Config.RECORD_NAME).toString(); - } - - private void addRepeatedFields(Map row, Object value, List fieldValue) { - if (fieldValue.isEmpty()) { - return; - } - List repeatedNestedFields = new ArrayList<>(); - String columnName = null; - for (Object f : fieldValue) { - if (f instanceof DynamicMessage) { - Properties nestedMappings = (Properties) value; - repeatedNestedFields.add(getMappings((DynamicMessage) f, nestedMappings)); - columnName = getNestedColumnName(nestedMappings); - } else { - repeatedNestedFields.add(f); - columnName = (String) value; - } - } - row.put(columnName, repeatedNestedFields); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoField.java deleted file mode 100644 index 6b3e95814..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoField.java +++ /dev/null @@ -1,31 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - - -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors; -import lombok.AllArgsConstructor; - -import java.util.Base64; - -@AllArgsConstructor -public class ByteProtoField implements ProtoField { - - private final Descriptors.FieldDescriptor descriptor; - private final Object fieldValue; - - @Override - public Object getValue() { - ByteString byteString = (ByteString) fieldValue; - byte[] bytes = byteString.toStringUtf8().getBytes(); - return base64Encode(bytes); - } - - private String base64Encode(byte[] bytes) { - return new String(Base64.getEncoder().encode(bytes)); - } - - @Override - public boolean matches() { - return descriptor.getType() == Descriptors.FieldDescriptor.Type.BYTES; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoField.java deleted file mode 100644 index 9268a993d..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoField.java +++ /dev/null @@ -1,18 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import lombok.AllArgsConstructor; - -@AllArgsConstructor -public class DefaultProtoField implements ProtoField { - private final Object fieldValue; - - @Override - public Object getValue() { - return fieldValue; - } - - @Override - public boolean matches() { - return false; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoField.java deleted file mode 100644 index 7df6a61ce..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoField.java +++ /dev/null @@ -1,31 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import lombok.AllArgsConstructor; - -import java.util.ArrayList; -import java.util.List; - -@AllArgsConstructor -public class EnumProtoField implements ProtoField { - private final Descriptors.FieldDescriptor descriptor; - private final Object fieldValue; - - @Override - public Object getValue() { - if (descriptor.isRepeated()) { - List enumValues = ((List) (fieldValue)); - List enumStrValues = new ArrayList<>(); - for (Descriptors.EnumValueDescriptor enumVal : enumValues) { - enumStrValues.add(enumVal.toString()); - } - return enumStrValues; - } - return fieldValue.toString(); - } - - @Override - public boolean matches() { - return descriptor.getType() == Descriptors.FieldDescriptor.Type.ENUM; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoField.java deleted file mode 100644 index f7fa5d5a7..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoField.java +++ /dev/null @@ -1,23 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import lombok.AllArgsConstructor; - -import java.util.List; - -@AllArgsConstructor -public class NestedProtoField implements ProtoField { - private final Descriptors.FieldDescriptor descriptor; - private final Object fieldValue; - - @Override - public DynamicMessage getValue() { - return (DynamicMessage) fieldValue; - } - - @Override - public boolean matches() { - return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE && !(fieldValue instanceof List); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ProtoField.java deleted file mode 100644 index 5d5e59035..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/ProtoField.java +++ /dev/null @@ -1,8 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -public interface ProtoField { - - Object getValue(); - - boolean matches(); -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoField.java deleted file mode 100644 index 11cd33335..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoField.java +++ /dev/null @@ -1,45 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.util.JsonFormat; -import lombok.AllArgsConstructor; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -@AllArgsConstructor -public class StructProtoField implements ProtoField { - private final Descriptors.FieldDescriptor descriptor; - private final Object fieldValue; - - @Override - public Object getValue() { - try { - if (fieldValue instanceof Collection) { - List structStrValues = new ArrayList<>(); - for (Object field : (Collection) fieldValue) { - structStrValues.add(getString(field)); - } - return structStrValues; - } - return getString(fieldValue); - } catch (InvalidProtocolBufferException e) { - return ""; - } - } - - private String getString(Object field) throws InvalidProtocolBufferException { - return JsonFormat.printer() - .omittingInsignificantWhitespace() - .print((DynamicMessage) field); - } - - @Override - public boolean matches() { - return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE - && descriptor.getMessageType().getFullName().equals(com.google.protobuf.Struct.getDescriptor().getFullName()); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoField.java deleted file mode 100644 index 0acc79623..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoField.java +++ /dev/null @@ -1,45 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.api.client.util.DateTime; -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import lombok.AllArgsConstructor; - -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -@AllArgsConstructor -public class TimestampProtoField implements ProtoField { - private final Descriptors.FieldDescriptor descriptor; - private final Object fieldValue; - - @Override - public Object getValue() { - if (fieldValue instanceof Collection) { - List tsValues = new ArrayList<>(); - for (Object field : (Collection) fieldValue) { - tsValues.add(getTime(field)); - } - return tsValues; - } - - return getTime(fieldValue); - } - - private DateTime getTime(Object field) { - DynamicMessage dynamicField = (DynamicMessage) field; - List descriptors = dynamicField.getDescriptorForType().getFields(); - List timeFields = new ArrayList<>(); - descriptors.forEach(desc -> timeFields.add(dynamicField.getField(desc))); - Instant time = Instant.ofEpochSecond((long) timeFields.get(0), ((Integer) timeFields.get(1)).longValue()); - return new DateTime(time.toEpochMilli()); - } - - @Override - public boolean matches() { - return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE - && descriptor.getMessageType().getFullName().equals(com.google.protobuf.Timestamp.getDescriptor().getFullName()); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorDescriptor.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorDescriptor.java deleted file mode 100644 index 328155c5b..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorDescriptor.java +++ /dev/null @@ -1,15 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - - -/** - * Descriptor interface that defines the various error descriptors and the corresponding error types. - */ -public interface ErrorDescriptor { - - /** - * If the implementing descriptor matches the condition as prescribed in the concrete implementation. - * - * @return - true if the condition matches, false otherwise. - */ - boolean matches(); -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorParser.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorParser.java deleted file mode 100644 index 581127eb8..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/ErrorParser.java +++ /dev/null @@ -1,37 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - - -import com.google.cloud.bigquery.BigQueryError; - -import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; - -/** - * ErrorParser determines the {@link ErrorDescriptor} classes error based on the - * error string supplied. - */ -public class ErrorParser { - - public static ErrorDescriptor getError(String reasonText, String msgText) { - List errDescList = Arrays.asList( - new InvalidSchemaError(reasonText, msgText), - new OOBError(reasonText, msgText), - new StoppedError(reasonText)); - - ErrorDescriptor errorDescriptor = errDescList - .stream() - .filter(ErrorDescriptor::matches) - .findFirst() - .orElse(new UnknownError()); - - return errorDescriptor; - } - - public static List parseError(List bqErrors) { - return bqErrors.stream() - .map(err -> getError(err.getReason(), err.getMessage())) - .collect(Collectors.toList()); - } - -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/InvalidSchemaError.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/InvalidSchemaError.java deleted file mode 100644 index ec5a2e8ac..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/InvalidSchemaError.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - -import lombok.AllArgsConstructor; - -@AllArgsConstructor -/** - * This error returns when there is any kind of invalid input - * other than an invalid query, such as missing required fields - * or an invalid table schema. - * - * https://cloud.google.com/bigquery/docs/error-messages - * */ -public class InvalidSchemaError implements ErrorDescriptor { - - private final String reason; - private final String message; - - @Override - public boolean matches() { - return reason.equals("invalid") && message.contains("no such field"); - } - - @Override - public String toString() { - return "InvalidSchemaError"; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/OOBError.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/OOBError.java deleted file mode 100644 index 70849925e..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/OOBError.java +++ /dev/null @@ -1,26 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - -import lombok.AllArgsConstructor; - -@AllArgsConstructor -/** - * Out of bounds are caused when the partitioned column has a date value less than - * 5 years and more than 1 year in future - * */ -public class OOBError implements ErrorDescriptor { - - private final String reason; - private final String message; - - @Override - public boolean matches() { - return reason.equals("invalid") - && ((message.contains("is outside the allowed bounds") && message.contains("days in the past and") && message.contains("days in the future")) - || message.contains("out of range")); - } - @Override - public String toString() { - return "OOBError"; - } - -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/StoppedError.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/StoppedError.java deleted file mode 100644 index b4c661255..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/StoppedError.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - -import lombok.AllArgsConstructor; - -@AllArgsConstructor -/** - * stopped 200 This status code returns when a job is canceled. - * This will be returned if a batch of insertion has some bad records - * which caused the job to be cancelled. Bad records will have some *other* error - * but rest of records will be marked as "stopped" and can be sent as is - * - * https://cloud.google.com/bigquery/docs/error-messages - * */ -public class StoppedError implements ErrorDescriptor { - - private final String reason; - - @Override - public boolean matches() { - return reason.equals("stopped"); - } - - @Override - public String toString() { - return "Stopped"; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/error/UnknownError.java b/src/main/java/io/odpf/firehose/sink/bigquery/error/UnknownError.java deleted file mode 100644 index d11d6eab7..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/error/UnknownError.java +++ /dev/null @@ -1,21 +0,0 @@ -package io.odpf.firehose.sink.bigquery.error; - -import lombok.AllArgsConstructor; - -@AllArgsConstructor -/** - * UnknownError is used when error factory failed to match any possible - * known errors - * */ -public class UnknownError implements ErrorDescriptor { - - @Override - public boolean matches() { - return false; - } - - @Override - public String toString() { - return "UnknownError"; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQDatasetLocationChangedException.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQDatasetLocationChangedException.java deleted file mode 100644 index d0f325cd9..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQDatasetLocationChangedException.java +++ /dev/null @@ -1,8 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class BQDatasetLocationChangedException extends RuntimeException { - public BQDatasetLocationChangedException(String message) { - super(message); - } -} - diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQPartitionKeyNotSpecified.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQPartitionKeyNotSpecified.java deleted file mode 100644 index 467f1c75d..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQPartitionKeyNotSpecified.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class BQPartitionKeyNotSpecified extends RuntimeException { - public BQPartitionKeyNotSpecified(String message) { - super(message); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQSchemaMappingException.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQSchemaMappingException.java deleted file mode 100644 index 1b295fad5..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQSchemaMappingException.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class BQSchemaMappingException extends RuntimeException { - public BQSchemaMappingException(String message) { - super(message); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQTableUpdateFailure.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQTableUpdateFailure.java deleted file mode 100644 index 01519015e..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BQTableUpdateFailure.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class BQTableUpdateFailure extends RuntimeException { - public BQTableUpdateFailure(String message, Throwable rootCause) { - super(message, rootCause); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BigQuerySinkException.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/BigQuerySinkException.java deleted file mode 100644 index 5f01eed33..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/BigQuerySinkException.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -import lombok.EqualsAndHashCode; - -@EqualsAndHashCode(callSuper = false) -public class BigQuerySinkException extends RuntimeException { -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/ConfigurationException.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/ConfigurationException.java deleted file mode 100644 index c73ee8f98..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/ConfigurationException.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class ConfigurationException extends RuntimeException { - public ConfigurationException(String message) { - super(message); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/exception/ProtoNotFoundException.java b/src/main/java/io/odpf/firehose/sink/bigquery/exception/ProtoNotFoundException.java deleted file mode 100644 index b467ad7c8..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/exception/ProtoNotFoundException.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.odpf.firehose.sink.bigquery.exception; - -public class ProtoNotFoundException extends RuntimeException { - public ProtoNotFoundException(String message) { - super(message); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinition.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinition.java deleted file mode 100644 index 648f751ad..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinition.java +++ /dev/null @@ -1,64 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.StandardTableDefinition; -import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.sink.bigquery.exception.BQPartitionKeyNotSpecified; -import lombok.AllArgsConstructor; - -import java.util.Optional; - -@AllArgsConstructor -public class BQTableDefinition { - private final BigQuerySinkConfig bqConfig; - - public StandardTableDefinition getTableDefinition(Schema schema) { - StandardTableDefinition tableDefinition = StandardTableDefinition.newBuilder() - .setSchema(schema) - .build(); - if (!bqConfig.isTablePartitioningEnabled()) { - return tableDefinition; - } - return getPartitionedTableDefinition(schema); - } - - private StandardTableDefinition getPartitionedTableDefinition(Schema schema) { - StandardTableDefinition.Builder tableDefinition = StandardTableDefinition.newBuilder(); - Optional partitionFieldOptional = schema.getFields().stream().filter(obj -> obj.getName().equals(bqConfig.getTablePartitionKey())).findFirst(); - if (!partitionFieldOptional.isPresent()) { - throw new BQPartitionKeyNotSpecified(String.format("Partition key %s is not present in the schema", bqConfig.getTablePartitionKey())); - } - - Field partitionField = partitionFieldOptional.get(); - if (isTimePartitionedField(partitionField)) { - return createTimePartitionBuilder(tableDefinition) - .setSchema(schema) - .build(); - } else { - throw new UnsupportedOperationException("Range Bigquery partitioning is not supported, supported paritition fields have to be of DATE or TIMESTAMP type"); - } - } - - private StandardTableDefinition.Builder createTimePartitionBuilder(StandardTableDefinition.Builder tableBuilder) { - TimePartitioning.Builder timePartitioningBuilder = TimePartitioning.newBuilder(TimePartitioning.Type.DAY); - if (bqConfig.getTablePartitionKey() == null) { - throw new BQPartitionKeyNotSpecified(String.format("Partition key not specified for the table: %s", bqConfig.getTableName())); - } - timePartitioningBuilder.setField(bqConfig.getTablePartitionKey()) - .setRequirePartitionFilter(true); - - Long neverExpireMillis = null; - Long partitionExpiry = bqConfig.getBigQueryTablePartitionExpiryMS() > 0 ? bqConfig.getBigQueryTablePartitionExpiryMS() : neverExpireMillis; - timePartitioningBuilder.setExpirationMs(partitionExpiry); - - return tableBuilder - .setTimePartitioning(timePartitioningBuilder.build()); - } - - private boolean isTimePartitionedField(Field partitionField) { - return partitionField.getType() == LegacySQLTypeName.TIMESTAMP || partitionField.getType() == LegacySQLTypeName.DATE; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryClient.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryClient.java deleted file mode 100644 index 94aa800df..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryClient.java +++ /dev/null @@ -1,195 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.auth.oauth2.GoogleCredentials; -import com.google.cloud.TransportOptions; -import com.google.cloud.bigquery.BigQuery; -import com.google.cloud.bigquery.BigQueryException; -import com.google.cloud.bigquery.BigQueryOptions; -import com.google.cloud.bigquery.Dataset; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.InsertAllRequest; -import com.google.cloud.bigquery.InsertAllResponse; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.StandardTableDefinition; -import com.google.cloud.bigquery.Table; -import com.google.cloud.bigquery.TableDefinition; -import com.google.cloud.bigquery.TableId; -import com.google.cloud.bigquery.TableInfo; -import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.metrics.BigQueryMetrics; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.bigquery.exception.BQDatasetLocationChangedException; -import lombok.Getter; - -import java.io.FileInputStream; -import java.io.IOException; -import java.time.Instant; -import java.util.List; -import java.util.Random; - -public class BigQueryClient { - private final BigQuery bigquery; - @Getter - private final TableId tableID; - private final BigQuerySinkConfig bqConfig; - private final BQTableDefinition bqTableDefinition; - private final Instrumentation instrumentation; - private static final int TABLE_INFO_UPDATE_RETRIES = 10; - private static final int DEFAULT_SLEEP_RETRY = 10000; - private final Random random = new Random(System.currentTimeMillis()); - - public BigQueryClient(BigQuerySinkConfig bqConfig, Instrumentation instrumentation) throws IOException { - this(getBigQueryInstance(bqConfig), bqConfig, instrumentation); - } - - public BigQueryClient(BigQuery bq, BigQuerySinkConfig bqConfig, Instrumentation instrumentation) { - this.bigquery = bq; - this.bqConfig = bqConfig; - this.tableID = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - this.bqTableDefinition = new BQTableDefinition(bqConfig); - this.instrumentation = instrumentation; - } - - private static BigQuery getBigQueryInstance(BigQuerySinkConfig sinkConfig) throws IOException { - TransportOptions transportOptions = BigQueryOptions.getDefaultHttpTransportOptions().toBuilder() - .setConnectTimeout(sinkConfig.getBqClientConnectTimeoutMS()) - .setReadTimeout(sinkConfig.getBqClientReadTimeoutMS()) - .build(); - return BigQueryOptions.newBuilder() - .setTransportOptions(transportOptions) - .setCredentials(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getBigQueryCredentialPath()))) - .setProjectId(sinkConfig.getGCloudProjectID()) - .build().getService(); - } - - public InsertAllResponse insertAll(InsertAllRequest rows) { - Instant start = Instant.now(); - InsertAllResponse response = bigquery.insertAll(rows); - instrument(start, BigQueryMetrics.BigQueryAPIType.TABLE_INSERT_ALL); - return response; - } - - public void upsertTable(List bqSchemaFields) throws BigQueryException { - Schema schema = Schema.of(bqSchemaFields); - TableDefinition tableDefinition = getTableDefinition(schema); - TableInfo tableInfo = TableInfo.newBuilder(tableID, tableDefinition) - .setLabels(bqConfig.getTableLabels()) - .build(); - upsertDatasetAndTableWithRetry(tableInfo); - } - - private void upsertDatasetAndTableWithRetry(TableInfo info) { - for (int ii = 0; ii < TABLE_INFO_UPDATE_RETRIES; ii++) { - try { - upsertDatasetAndTable(info); - return; - } catch (BigQueryException e) { - instrumentation.logWarn(e.getMessage()); - if (e.getMessage().contains("Exceeded rate limits")) { - try { - int sleepMillis = random.nextInt(DEFAULT_SLEEP_RETRY); - instrumentation.logInfo("Waiting for " + sleepMillis + " milliseconds"); - Thread.sleep(sleepMillis); - } catch (InterruptedException interruptedException) { - instrumentation.captureNonFatalError(interruptedException, "Sleep interrupted"); - } - } else { - throw e; - } - } - } - } - - private void upsertDatasetAndTable(TableInfo tableInfo) { - Dataset dataSet = bigquery.getDataset(tableID.getDataset()); - if (dataSet == null || !bigquery.getDataset(tableID.getDataset()).exists()) { - Instant start = Instant.now(); - bigquery.create( - Dataset.newBuilder(tableID.getDataset()) - .setLocation(bqConfig.getBigQueryDatasetLocation()) - .setLabels(bqConfig.getDatasetLabels()) - .build() - ); - instrumentation.logInfo("Successfully CREATED bigquery DATASET: {}", tableID.getDataset()); - instrument(start, BigQueryMetrics.BigQueryAPIType.DATASET_CREATE); - } else if (shouldUpdateDataset(dataSet)) { - Instant start = Instant.now(); - bigquery.update( - Dataset.newBuilder(tableID.getDataset()) - .setLabels(bqConfig.getDatasetLabels()) - .build() - ); - instrumentation.logInfo("Successfully UPDATED bigquery DATASET: {} with labels", tableID.getDataset()); - instrument(start, BigQueryMetrics.BigQueryAPIType.DATASET_UPDATE); - } - - Table table = bigquery.getTable(tableID); - if (table == null || !table.exists()) { - Instant start = Instant.now(); - bigquery.create(tableInfo); - instrumentation.logInfo("Successfully CREATED bigquery TABLE: {}", tableID.getTable()); - instrument(start, BigQueryMetrics.BigQueryAPIType.TABLE_CREATE); - } else { - Schema existingSchema = table.getDefinition().getSchema(); - Schema updatedSchema = tableInfo.getDefinition().getSchema(); - - if (shouldUpdateTable(tableInfo, table, existingSchema, updatedSchema)) { - Instant start = Instant.now(); - bigquery.update(tableInfo); - instrumentation.logInfo("Successfully UPDATED bigquery TABLE: {}", tableID.getTable()); - instrument(start, BigQueryMetrics.BigQueryAPIType.TABLE_UPDATE); - } else { - instrumentation.logInfo("Skipping bigquery table update, since proto schema hasn't changed"); - } - } - } - - private void instrument(Instant startTime, BigQueryMetrics.BigQueryAPIType type) { - instrumentation.incrementCounter( - BigQueryMetrics.SINK_BIGQUERY_OPERATION_TOTAL, - String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, tableID.getTable()), - String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, tableID.getDataset()), - String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); - instrumentation.captureDurationSince( - BigQueryMetrics.SINK_BIGQUERY_OPERATION_LATENCY_MILLISECONDS, - startTime, - String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, tableID.getTable()), - String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, tableID.getDataset()), - String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); - } - - private boolean shouldUpdateTable(TableInfo tableInfo, Table table, Schema existingSchema, Schema updatedSchema) { - return !table.getLabels().equals(tableInfo.getLabels()) - || !existingSchema.equals(updatedSchema) - || shouldChangePartitionExpiryForStandardTable(table); - } - - private boolean shouldUpdateDataset(Dataset dataSet) { - if (!dataSet.getLocation().equals(bqConfig.getBigQueryDatasetLocation())) { - throw new BQDatasetLocationChangedException("Dataset location cannot be changed from " - + dataSet.getLocation() + " to " + bqConfig.getBigQueryDatasetLocation()); - } - - return !dataSet.getLabels().equals(bqConfig.getDatasetLabels()); - } - - private boolean shouldChangePartitionExpiryForStandardTable(Table table) { - if (!table.getDefinition().getType().equals(TableDefinition.Type.TABLE)) { - return false; - } - TimePartitioning timePartitioning = ((StandardTableDefinition) (table.getDefinition())).getTimePartitioning(); - if (timePartitioning == null) { - // If the table is not partitioned already, no need to update the table - return false; - } - long neverExpireMs = 0L; - Long currentExpirationMs = timePartitioning.getExpirationMs() == null ? neverExpireMs : timePartitioning.getExpirationMs(); - Long newExpirationMs = bqConfig.getBigQueryTablePartitionExpiryMS() > 0 ? bqConfig.getBigQueryTablePartitionExpiryMS() : neverExpireMs; - return !currentExpirationMs.equals(newExpirationMs); - } - - private TableDefinition getTableDefinition(Schema schema) { - return bqTableDefinition.getTableDefinition(schema); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParser.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParser.java deleted file mode 100644 index 096a44948..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParser.java +++ /dev/null @@ -1,71 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.BigQueryError; -import com.google.cloud.bigquery.InsertAllResponse; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; -import io.odpf.firehose.metrics.BigQueryMetrics; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.bigquery.error.ErrorDescriptor; -import io.odpf.firehose.sink.bigquery.error.ErrorParser; -import io.odpf.firehose.sink.bigquery.error.InvalidSchemaError; -import io.odpf.firehose.sink.bigquery.error.OOBError; -import io.odpf.firehose.sink.bigquery.error.StoppedError; -import io.odpf.firehose.sink.bigquery.exception.BigQuerySinkException; -import io.odpf.firehose.sink.bigquery.models.Record; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -public class BigQueryResponseParser { - /** - * Parses the {@link InsertAllResponse} object and returns {@link Message} that were - * tried to sink in BQ and the error type {@link ErrorDescriptor}. - * {@link InsertAllResponse} in bqResponse are 1 to 1 indexed based on the records that are requested to be inserted. - * - * @param records - list of records that were tried with BQ insertion - * @param bqResponse - the status of insertion for all records as returned by BQ - * @param instrumentation - instrumentation object for metrics/logging - * @return list of messages with error. - */ - public static List parseResponse(final List records, final InsertAllResponse bqResponse, Instrumentation instrumentation) { - if (!bqResponse.hasErrors()) { - return Collections.emptyList(); - } - List messages = new ArrayList<>(); - Map> insertErrorsMap = bqResponse.getInsertErrors(); - for (final Map.Entry> errorEntry : insertErrorsMap.entrySet()) { - final Message message = records.get(errorEntry.getKey().intValue()).getMessage(); - List errors = ErrorParser.parseError(errorEntry.getValue()); - instrumentation.logError("Error while bigquery insert for message. Record: {}, Error: {}, Topic: {}, Partition: {}, Offset: {}", - records.get(errorEntry.getKey().intValue()).getColumns(), - errors, - message.getTopic(), - message.getPartition(), - message.getOffset()); - if (errorMatch(errors, io.odpf.firehose.sink.bigquery.error.UnknownError.class)) { - message.setErrorInfo(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR)); - instrumentation.incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); - } else if (errorMatch(errors, InvalidSchemaError.class)) { - message.setErrorInfo(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); - instrumentation.incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); - } else if (errorMatch(errors, OOBError.class)) { - message.setErrorInfo(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); - instrumentation.incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); - } else if (errorMatch(errors, StoppedError.class)) { - message.setErrorInfo(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR)); - instrumentation.incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); - } - messages.add(message); - } - - return messages; - } - - private static boolean errorMatch(List errors, Class c) { - return errors.stream().anyMatch(errorDescriptor -> errorDescriptor.getClass().equals(c)); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRow.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRow.java deleted file mode 100644 index 243b9fb4d..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRow.java +++ /dev/null @@ -1,14 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - - -import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.firehose.sink.bigquery.models.Record; - -/** - * Fetches BQ insertable row from the base record {@link Record}. The implementations can differ if unique rows need to be inserted or not. - */ -public interface BigQueryRow { - - InsertAllRequest.RowToInsert of(Record record); -} - diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertId.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertId.java deleted file mode 100644 index 2e1462828..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertId.java +++ /dev/null @@ -1,11 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.firehose.sink.bigquery.models.Record; - -public class BigQueryRowWithInsertId implements BigQueryRow { - @Override - public InsertAllRequest.RowToInsert of(Record record) { - return InsertAllRequest.RowToInsert.of(record.getId(), record.getColumns()); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertId.java b/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertId.java deleted file mode 100644 index e956c94de..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertId.java +++ /dev/null @@ -1,12 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.firehose.sink.bigquery.models.Record; - -public class BigQueryRowWithoutInsertId implements BigQueryRow { - - @Override - public InsertAllRequest.RowToInsert of(Record record) { - return InsertAllRequest.RowToInsert.of(record.getColumns()); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/BQField.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/BQField.java deleted file mode 100644 index 4d89b5bbf..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/BQField.java +++ /dev/null @@ -1,102 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.FieldList; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.DescriptorProtos; -import io.odpf.firehose.sink.bigquery.exception.BQSchemaMappingException; -import lombok.EqualsAndHashCode; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@EqualsAndHashCode -public class BQField { - private static final Map FIELD_LABEL_TO_BQ_MODE_MAP = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, Field.Mode.NULLABLE); - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, Field.Mode.REPEATED); - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REQUIRED, Field.Mode.REQUIRED); - }}; - private static final Map FIELD_TYPE_TO_BQ_TYPE_MAP = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, LegacySQLTypeName.RECORD); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_GROUP, LegacySQLTypeName.RECORD); - }}; - private static final Map FIELD_NAME_TO_BQ_TYPE_MAP = new HashMap() {{ - put(Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, LegacySQLTypeName.TIMESTAMP); - put(Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, LegacySQLTypeName.STRING); - put(Constants.ProtobufTypeName.DURATION_PROTOBUF_TYPE_NAME, LegacySQLTypeName.RECORD); - }}; - private final String name; - private final Field.Mode mode; - private final LegacySQLTypeName type; - private List subFields; - - public BQField(String name, Field.Mode mode, LegacySQLTypeName type, List subFields) { - this.name = name; - this.mode = mode; - this.type = type; - this.subFields = subFields; - } - - public BQField(ProtoField protoField) { - this.name = protoField.getName(); - this.mode = FIELD_LABEL_TO_BQ_MODE_MAP.get(protoField.getLabel()); - this.type = getType(protoField); - this.subFields = new ArrayList<>(); - } - - /** - * Map fully qualified type name or protobuf type to bigquery types. - * Fully qualified name will be used as mapping key before protobuf type being used - * @param protoField - * @return - */ - private LegacySQLTypeName getType(ProtoField protoField) { - LegacySQLTypeName typeFromFieldName = FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) != null - ? FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) - : FIELD_TYPE_TO_BQ_TYPE_MAP.get(protoField.getType()); - if (typeFromFieldName == null) { - throw new BQSchemaMappingException(String.format("No type mapping found for field: %s, fieldType: %s, typeName: %s", protoField.getName(), protoField.getType(), protoField.getTypeName())); - } - return typeFromFieldName; - } - - public void setSubFields(List fields) { - this.subFields = fields; - } - - public Field getField() { - if (this.subFields == null || this.subFields.size() == 0) { - return Field.newBuilder(this.name, this.type).setMode(this.mode).build(); - } - return Field.newBuilder(this.name, this.type, FieldList.of(subFields)).setMode(this.mode).build(); - } - - public String getName() { - return name; - } - - public LegacySQLTypeName getType() { - return type; - } - - -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/Constants.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/Constants.java deleted file mode 100644 index e29d18ffd..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/Constants.java +++ /dev/null @@ -1,20 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - - -public class Constants { - public static final String PARTITION_COLUMN_NAME = "message_partition"; - public static final String OFFSET_COLUMN_NAME = "message_offset"; - public static final String TOPIC_COLUMN_NAME = "message_topic"; - public static final String TIMESTAMP_COLUMN_NAME = "message_timestamp"; - public static final String LOAD_TIME_COLUMN_NAME = "load_time"; - - public static class Config { - public static final String RECORD_NAME = "record_name"; - } - - public static class ProtobufTypeName { - public static final String TIMESTAMP_PROTOBUF_TYPE_NAME = ".google.protobuf.Timestamp"; - public static final String STRUCT_PROTOBUF_TYPE_NAME = ".google.protobuf.Struct"; - public static final String DURATION_PROTOBUF_TYPE_NAME = ".google.protobuf.Duration"; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/MetadataUtil.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/MetadataUtil.java deleted file mode 100644 index 10301cc37..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/MetadataUtil.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.FieldList; -import com.google.cloud.bigquery.LegacySQLTypeName; - -import java.util.ArrayList; -import java.util.List; - -public class MetadataUtil { - public static List getMetadataFields() { - return new ArrayList() {{ - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - } - - public static Field getNamespacedMetadataField(String namespace) { - return Field - .newBuilder(namespace, LegacySQLTypeName.RECORD, FieldList.of(getMetadataFields())) - .setMode(Field.Mode.NULLABLE) - .build(); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/ProtoField.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/ProtoField.java deleted file mode 100644 index 239cf790d..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/ProtoField.java +++ /dev/null @@ -1,85 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import com.google.protobuf.DescriptorProtos; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class ProtoField { - private String name; - private String typeName; - private DescriptorProtos.FieldDescriptorProto.Type type; - private DescriptorProtos.FieldDescriptorProto.Label label; - private List fields; - private int index; - - public ProtoField() { - this.fields = new ArrayList<>(); - } - - public ProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label, List fields, int index) { - this.name = name; - this.typeName = typeName; - this.type = type; - this.label = label; - this.fields = fields; - this.index = index; - } - - public ProtoField(DescriptorProtos.FieldDescriptorProto f) { - this.name = f.getName(); - this.type = f.getType(); - this.label = f.getLabel(); - this.index = f.getNumber(); - this.fields = new ArrayList<>(); - this.typeName = f.getTypeName(); - } - - public boolean isNested() { - if (this.typeName != null && !this.typeName.equals("")) { - return !typeName.equals(Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME) - && !typeName.equals(Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME) - && type == DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE; - } - return type == DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE; - } - - public void addField(ProtoField field) { - this.fields.add(field); - } - - @Override - public String toString() { - return "{" - + "name='" + name + '\'' - + ", type=" + type - + ", len=" + fields.size() - + ", nested=" + Arrays.toString(fields.toArray()) - + '}'; - } - - public List getFields() { - return fields; - } - - public int getIndex() { - return index; - } - - public String getName() { - return name; - } - - public DescriptorProtos.FieldDescriptorProto.Label getLabel() { - return label; - } - - public DescriptorProtos.FieldDescriptorProto.Type getType() { - return type; - } - - public String getTypeName() { - return typeName; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/Record.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/Record.java deleted file mode 100644 index 15402bcf9..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/Record.java +++ /dev/null @@ -1,22 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import io.odpf.firehose.message.Message; -import lombok.AllArgsConstructor; -import lombok.Getter; - -import java.util.Map; - -@AllArgsConstructor -@Getter -public class Record { - private final Message message; - private final Map columns; - - public String getId() { - return String.format("%s_%d_%d", message.getTopic(), message.getPartition(), message.getOffset()); - } - - public long getSize() { - return (message.getLogKey() == null ? 0 : message.getLogKey().length) + (message.getLogMessage() == null ? 0 : message.getLogMessage().length); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/models/Records.java b/src/main/java/io/odpf/firehose/sink/bigquery/models/Records.java deleted file mode 100644 index 57ccd16e5..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/models/Records.java +++ /dev/null @@ -1,13 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import lombok.AllArgsConstructor; -import lombok.Getter; - -import java.util.List; - -@AllArgsConstructor -@Getter -public class Records { - private final List validRecords; - private final List invalidRecords; -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/proto/DescriptorCache.java b/src/main/java/io/odpf/firehose/sink/bigquery/proto/DescriptorCache.java deleted file mode 100644 index 7fe778428..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/proto/DescriptorCache.java +++ /dev/null @@ -1,18 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.protobuf.Descriptors; - -import java.util.Map; - -public class DescriptorCache { - public Descriptors.Descriptor fetch(Map allDescriptors, Map typeNameToPackageNameMap, String protoName) { - if (allDescriptors.get(protoName) != null) { - return allDescriptors.get(protoName); - } - String packageName = typeNameToPackageNameMap.get(protoName); - if (packageName == null) { - return null; - } - return allDescriptors.get(packageName); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParser.java b/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParser.java deleted file mode 100644 index 612f66bd9..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParser.java +++ /dev/null @@ -1,44 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.protobuf.Descriptors; -import io.odpf.firehose.sink.bigquery.exception.ProtoNotFoundException; -import io.odpf.firehose.sink.bigquery.models.ProtoField; - -import java.util.Map; - -public class ProtoFieldParser { - /** - * Bigquery supports a maximum of 15 level nested structures. - * Thus for nested data type of more than 15 level or for recursive data type, - * we limit the fields to only contain schema upto 15 levels deep - */ - private static final int MAX_BIGQUERY_NESTED_SCHEMA_LEVEL = 15; - private final DescriptorCache descriptorCache = new DescriptorCache(); - - public ProtoField parseFields(ProtoField protoField, String protoSchema, Map allDescriptors, - Map typeNameToPackageNameMap) { - return parseFields(protoField, protoSchema, allDescriptors, typeNameToPackageNameMap, 1); - } - - private ProtoField parseFields(ProtoField protoField, String protoSchema, Map allDescriptors, - Map typeNameToPackageNameMap, int level) { - - Descriptors.Descriptor currentProto = descriptorCache.fetch(allDescriptors, typeNameToPackageNameMap, protoSchema); - if (currentProto == null) { - throw new ProtoNotFoundException("No Proto found for class " + protoSchema); - } - for (Descriptors.FieldDescriptor field : currentProto.getFields()) { - ProtoField fieldModel = new ProtoField(field.toProto()); - if (fieldModel.isNested()) { - if (protoSchema.substring(1).equals(currentProto.getFullName())) { - if (level >= MAX_BIGQUERY_NESTED_SCHEMA_LEVEL) { - continue; - } - } - fieldModel = parseFields(fieldModel, field.toProto().getTypeName(), allDescriptors, typeNameToPackageNameMap, level + 1); - } - protoField.addField(fieldModel); - } - return protoField; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapper.java b/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapper.java deleted file mode 100644 index 1a79bafc2..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapper.java +++ /dev/null @@ -1,60 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.cloud.bigquery.Field; -import io.odpf.firehose.sink.bigquery.models.BQField; -import io.odpf.firehose.sink.bigquery.models.Constants; -import io.odpf.firehose.sink.bigquery.models.ProtoField; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class ProtoMapper { - private ObjectMapper objectMapper; - - public ProtoMapper() { - objectMapper = new ObjectMapper(); - } - - public String generateColumnMappings(List fields) throws IOException { - ObjectNode objectNode = generateColumnMappingsJson(fields); - return objectMapper.writeValueAsString(objectNode); - } - - private ObjectNode generateColumnMappingsJson(List fields) { - if (fields.size() == 0) { - return JsonNodeFactory.instance.objectNode(); - } - - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - for (ProtoField field : fields) { - if (field.isNested()) { - ObjectNode innerJSONValue = generateColumnMappingsJson(field.getFields()); - innerJSONValue.put(Constants.Config.RECORD_NAME, field.getName()); - objNode.put(String.valueOf(field.getIndex()), innerJSONValue); - } else { - objNode.put(String.valueOf(field.getIndex()), field.getName()); - } - } - return objNode; - } - - public List generateBigquerySchema(ProtoField protoField) { - if (protoField == null) { - return null; - } - List schemaFields = new ArrayList<>(); - for (ProtoField field : protoField.getFields()) { - BQField bqField = new BQField(field); - if (field.isNested()) { - List fields = generateBigquerySchema(field); - bqField.setSubFields(fields); - } - schemaFields.add(bqField.getField()); - } - return schemaFields; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListener.java b/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListener.java deleted file mode 100644 index d55801c0c..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListener.java +++ /dev/null @@ -1,140 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.cloud.bigquery.BigQueryException; -import com.google.cloud.bigquery.Field; -import com.google.gson.Gson; -import com.google.gson.reflect.TypeToken; -import com.google.protobuf.Descriptors.Descriptor; - -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverter; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverterCache; -import io.odpf.firehose.sink.bigquery.converter.RowMapper; -import io.odpf.firehose.sink.bigquery.exception.BQSchemaMappingException; -import io.odpf.firehose.sink.bigquery.exception.BQTableUpdateFailure; -import io.odpf.firehose.sink.bigquery.handler.BigQueryClient; -import io.odpf.firehose.sink.bigquery.models.MetadataUtil; -import io.odpf.firehose.sink.bigquery.models.ProtoField; -import io.odpf.stencil.Parser; -import lombok.Getter; -import lombok.Setter; -import lombok.extern.slf4j.Slf4j; - -import java.io.IOException; -import java.lang.reflect.Type; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Properties; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -@Slf4j -public class ProtoUpdateListener implements io.odpf.stencil.SchemaUpdateListener { - private final BigQuerySinkConfig config; - private final ProtoMapper protoMapper = new ProtoMapper(); - private final ProtoFieldParser protoMappingParser = new ProtoFieldParser(); - private final BigQueryClient bqClient; - @Getter - private final MessageRecordConverterCache messageRecordConverterCache; - @Setter - private Parser stencilParser; - private static final Gson GSON = new Gson(); - - public ProtoUpdateListener(BigQuerySinkConfig config, BigQueryClient bqClient, MessageRecordConverterCache messageRecordConverterCache) { - this.config = config; - this.bqClient = bqClient; - this.messageRecordConverterCache = messageRecordConverterCache; - } - - @Override - public void onSchemaUpdate(Map newDescriptors) { - log.info("stencil cache was refreshed, validating if bigquery schema changed"); - try { - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, config.getInputSchemaProtoClass(), newDescriptors, - getTypeNameToPackageNameMap(newDescriptors)); - updateProtoParser(protoField); - } catch (BigQueryException | IOException e) { - String errMsg = "Error while updating bigquery table on callback:" + e.getMessage(); - log.error(errMsg); - throw new BQTableUpdateFailure(errMsg, e); - } - } - - private Map getTypeNameToPackageNameMap(Map descriptors) { - return descriptors.entrySet().stream() - .filter(distinctByFullName(t -> t.getValue().getFullName())) - .collect(Collectors.toMap( - (mapEntry) -> String.format(".%s", mapEntry.getValue().getFullName()), - (mapEntry) -> mapEntry.getKey())); - } - - private Predicate distinctByFullName(Function keyExtractor) { - Set objects = new HashSet<>(); - return t -> objects.add(keyExtractor.apply(t)); - } - - // First get latest protomapping, update bq schema, and if all goes fine - // then only update beast's proto mapping config - private void updateProtoParser(final ProtoField protoField) throws IOException { - String protoMappingString = protoMapper.generateColumnMappings(protoField.getFields()); - List bqSchemaFields = protoMapper.generateBigquerySchema(protoField); - addMetadataFields(bqSchemaFields); - bqClient.upsertTable(bqSchemaFields); - setProtoParser(protoMappingString); - } - - private Properties mapToProperties(Map inputMap) { - Properties properties = new Properties(); - for (Map.Entry kv : inputMap.entrySet()) { - if (kv.getValue() instanceof String) { - properties.put(kv.getKey(), kv.getValue()); - } else if (kv.getValue() instanceof Map) { - properties.put(kv.getKey(), mapToProperties((Map) kv.getValue())); - } - } - return properties; - } - - private void addMetadataFields(List bqSchemaFields) { - List bqMetadataFields = new ArrayList<>(); - String namespaceName = config.getBqMetadataNamespace(); - if (namespaceName.isEmpty()) { - bqMetadataFields.addAll(MetadataUtil.getMetadataFields()); - } else { - bqMetadataFields.add(MetadataUtil.getNamespacedMetadataField(namespaceName)); - } - - List duplicateFields = getDuplicateFields(bqSchemaFields, bqMetadataFields).stream().map(Field::getName).collect(Collectors.toList()); - if (duplicateFields.size() > 0) { - throw new BQSchemaMappingException(String.format("Metadata field(s) is already present in the schema. " - + "fields: %s", duplicateFields)); - } - bqSchemaFields.addAll(bqMetadataFields); - } - - private void setProtoParser(String protoMapping) { - Type type = new TypeToken>() { - }.getType(); - Map m = GSON.fromJson(protoMapping, type); - Properties columnMapping = mapToProperties(m); - messageRecordConverterCache.setMessageRecordConverter( - new MessageRecordConverter(new RowMapper(columnMapping), - stencilParser, config)); - } - - public void close() throws IOException { - } - - private List getDuplicateFields(List fields1, List fields2) { - return fields1.stream().filter(field -> containsField(fields2, field.getName())).collect(Collectors.toList()); - } - - private boolean containsField(List fields, String fieldName) { - return fields.stream().anyMatch(field -> field.getName().equals(fieldName)); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/bigquery/proto/UnknownProtoFields.java b/src/main/java/io/odpf/firehose/sink/bigquery/proto/UnknownProtoFields.java deleted file mode 100644 index 52ffd0070..000000000 --- a/src/main/java/io/odpf/firehose/sink/bigquery/proto/UnknownProtoFields.java +++ /dev/null @@ -1,21 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.UnknownFieldSet; -import lombok.extern.slf4j.Slf4j; - -/** - * Try to convert raw proto bytes to some meaningful representation that is good enough for debug. - * */ -@Slf4j -public class UnknownProtoFields { - public static String toString(byte[] message) { - String convertedFields = ""; - try { - convertedFields = UnknownFieldSet.parseFrom(message).toString(); - } catch (InvalidProtocolBufferException e) { - log.warn("invalid byte representation of a protobuf message: {}", new String(message)); - } - return convertedFields; - } -} diff --git a/src/main/java/io/odpf/firehose/sink/blob/BlobSink.java b/src/main/java/io/odpf/firehose/sink/blob/BlobSink.java index ce44741bb..9f16e686d 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/BlobSink.java +++ b/src/main/java/io/odpf/firehose/sink/blob/BlobSink.java @@ -1,14 +1,14 @@ package io.odpf.firehose.sink.blob; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.message.Message; import io.odpf.firehose.consumer.kafka.OffsetManager; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.SinkException; import io.odpf.firehose.exception.UnknownFieldsException; import io.odpf.firehose.exception.EmptyMessageException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.blob.message.MessageDeSerializer; import io.odpf.firehose.sink.blob.message.Record; @@ -30,8 +30,8 @@ public class BlobSink extends AbstractSink { private List messages; - public BlobSink(Instrumentation instrumentation, String sinkType, OffsetManager offsetManager, WriterOrchestrator writerOrchestrator, MessageDeSerializer messageDeSerializer) { - super(instrumentation, sinkType); + public BlobSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, OffsetManager offsetManager, WriterOrchestrator writerOrchestrator, MessageDeSerializer messageDeSerializer) { + super(firehoseInstrumentation, sinkType); this.offsetManager = offsetManager; this.writerOrchestrator = writerOrchestrator; this.messageDeSerializer = messageDeSerializer; @@ -47,16 +47,16 @@ protected List execute() throws Exception { String filePath = writerOrchestrator.write(record); fileToMessages.computeIfAbsent(filePath, key -> new ArrayList<>()).add(message); } catch (EmptyMessageException e) { - getInstrumentation().logWarn("empty message found on topic: {}, partition: {}, offset: {}", + getFirehoseInstrumentation().logWarn("empty message found on topic: {}, partition: {}, offset: {}", message.getTopic(), message.getPartition(), message.getOffset()); message.setErrorInfo(new ErrorInfo(e, ErrorType.INVALID_MESSAGE_ERROR)); failedMessages.add(message); } catch (UnknownFieldsException e) { - getInstrumentation().logWarn(e.getMessage()); + getFirehoseInstrumentation().logWarn(e.getMessage()); message.setErrorInfo(new ErrorInfo(e, ErrorType.UNKNOWN_FIELDS_ERROR)); failedMessages.add(message); } catch (DeserializerException e) { - getInstrumentation().logWarn("message deserialization failed on topic: {}, partition: {}, offset: {}, reason: {}", + getFirehoseInstrumentation().logWarn("message deserialization failed on topic: {}, partition: {}, offset: {}, reason: {}", message.getTopic(), message.getPartition(), message.getOffset(), e.getMessage()); message.setErrorInfo(new ErrorInfo(e, ErrorType.DESERIALIZATION_ERROR)); failedMessages.add(message); diff --git a/src/main/java/io/odpf/firehose/sink/blob/BlobSinkFactory.java b/src/main/java/io/odpf/firehose/sink/blob/BlobSinkFactory.java index 9c7afe3a7..faf08c060 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/BlobSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/blob/BlobSinkFactory.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.blob; import com.google.protobuf.Descriptors; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.BlobSinkConfig; import io.odpf.firehose.consumer.kafka.OffsetManager; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.sink.blob.message.MessageDeSerializer; import io.odpf.firehose.sink.blob.proto.KafkaMetadataProtoMessage; @@ -34,7 +34,7 @@ public static Sink create(Map configuration, OffsetManager offse WriterOrchestrator writerOrchestrator = new WriterOrchestrator(sinkConfig, localStorage, sinkBlobStorage, statsDReporter); MessageDeSerializer messageDeSerializer = new MessageDeSerializer(sinkConfig, stencilClient); return new BlobSink( - new Instrumentation(statsDReporter, BlobSink.class), + new FirehoseInstrumentation(statsDReporter, BlobSink.class), sinkConfig.getSinkType().toString(), offsetManager, writerOrchestrator, @@ -60,7 +60,7 @@ private static LocalStorage getLocalFileWriterWrapper(BlobSinkConfig sinkConfig, outputMessageDescriptor, metadataMessageDescriptor.getFields(), writerPolicies, - new Instrumentation(statsDReporter, LocalStorage.class)); + new FirehoseInstrumentation(statsDReporter, LocalStorage.class)); } public static BlobStorage createSinkObjectStorage(BlobSinkConfig sinkConfig, Map configuration) { diff --git a/src/main/java/io/odpf/firehose/sink/blob/writer/WriterOrchestrator.java b/src/main/java/io/odpf/firehose/sink/blob/writer/WriterOrchestrator.java index 87afa327b..71fac427e 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/writer/WriterOrchestrator.java +++ b/src/main/java/io/odpf/firehose/sink/blob/writer/WriterOrchestrator.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.blob.writer; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.BlobSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.blobstorage.BlobStorage; import io.odpf.firehose.sink.blob.message.Record; import io.odpf.firehose.sink.blob.writer.local.LocalFileMetadata; @@ -58,7 +58,7 @@ public WriterOrchestrator(BlobSinkConfig sinkConfig, LocalStorage localStorage, new LocalFileChecker( toBeFlushedToRemotePaths, timePartitionWriterMap, - localStorage, new Instrumentation(statsDReporter, LocalFileChecker.class)), + localStorage, new FirehoseInstrumentation(statsDReporter, LocalFileChecker.class)), FILE_CHECKER_THREAD_INITIAL_DELAY_SECONDS, FILE_CHECKER_THREAD_FREQUENCY_SECONDS, TimeUnit.SECONDS); @@ -71,7 +71,7 @@ localStorage, new Instrumentation(statsDReporter, LocalFileChecker.class)), remoteUploadFutures, remoteUploadScheduler, blobStorage, - new Instrumentation(statsDReporter, BlobStorageChecker.class)), + new FirehoseInstrumentation(statsDReporter, BlobStorageChecker.class)), FILE_CHECKER_THREAD_INITIAL_DELAY_SECONDS, FILE_CHECKER_THREAD_FREQUENCY_SECONDS, TimeUnit.SECONDS); diff --git a/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalFileChecker.java b/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalFileChecker.java index fc69a9e77..135648505 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalFileChecker.java +++ b/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalFileChecker.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.blob.writer.local; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import java.io.IOException; import java.nio.file.Path; @@ -17,22 +17,22 @@ public class LocalFileChecker implements Runnable { private final Queue toBeFlushedToRemotePaths; private final Map timePartitionWriterMap; private final LocalStorage localStorage; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; public LocalFileChecker(Queue toBeFlushedToRemotePaths, Map timePartitionWriterMap, LocalStorage localStorage, - Instrumentation instrumentation) { + FirehoseInstrumentation firehoseInstrumentation) { this.toBeFlushedToRemotePaths = toBeFlushedToRemotePaths; this.timePartitionWriterMap = timePartitionWriterMap; this.localStorage = localStorage; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } @Override public void run() { - instrumentation.captureValue(LOCAL_FILE_OPEN_TOTAL, timePartitionWriterMap.size()); + firehoseInstrumentation.captureValue(LOCAL_FILE_OPEN_TOTAL, timePartitionWriterMap.size()); Map toBeRotated = timePartitionWriterMap.entrySet().stream().filter(kv -> localStorage.shouldRotate(kv.getValue())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); @@ -41,7 +41,7 @@ public void run() { try { Instant startTime = Instant.now(); LocalFileMetadata metadata = writer.closeAndFetchMetaData(); - instrumentation.logInfo("Closing Local File {} ", metadata.getFullPath()); + firehoseInstrumentation.logInfo("Closing Local File {} ", metadata.getFullPath()); toBeFlushedToRemotePaths.add(metadata); captureFileClosedSuccessMetric(startTime, metadata); } catch (IOException e) { @@ -50,17 +50,17 @@ public void run() { throw new LocalFileWriterFailedException(e); } }); - instrumentation.captureValue(LOCAL_FILE_OPEN_TOTAL, timePartitionWriterMap.size()); + firehoseInstrumentation.captureValue(LOCAL_FILE_OPEN_TOTAL, timePartitionWriterMap.size()); } private void captureFileClosedSuccessMetric(Instant startTime, LocalFileMetadata localFileMetadata) { - instrumentation.incrementCounter(LOCAL_FILE_CLOSE_TOTAL, SUCCESS_TAG); - instrumentation.captureDurationSince(LOCAL_FILE_CLOSING_TIME_MILLISECONDS, startTime); - instrumentation.captureCount(LOCAL_FILE_SIZE_BYTES, localFileMetadata.getSize()); - instrumentation.captureCount(LOCAL_FILE_RECORDS_TOTAL, localFileMetadata.getRecordCount()); + firehoseInstrumentation.incrementCounter(LOCAL_FILE_CLOSE_TOTAL, SUCCESS_TAG); + firehoseInstrumentation.captureDurationSince(LOCAL_FILE_CLOSING_TIME_MILLISECONDS, startTime); + firehoseInstrumentation.captureCount(LOCAL_FILE_SIZE_BYTES, localFileMetadata.getSize()); + firehoseInstrumentation.captureCount(LOCAL_FILE_RECORDS_TOTAL, localFileMetadata.getRecordCount()); } private void captureFileCloseFailedMetric() { - instrumentation.incrementCounter(LOCAL_FILE_CLOSE_TOTAL, FAILURE_TAG); + firehoseInstrumentation.incrementCounter(LOCAL_FILE_CLOSE_TOTAL, FAILURE_TAG); } } diff --git a/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalStorage.java b/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalStorage.java index 695d4eb6c..dbbd858fc 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalStorage.java +++ b/src/main/java/io/odpf/firehose/sink/blob/writer/local/LocalStorage.java @@ -3,7 +3,7 @@ import com.google.protobuf.Descriptors; import io.odpf.firehose.config.BlobSinkConfig; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.blob.writer.local.policy.WriterPolicy; import lombok.AllArgsConstructor; @@ -21,7 +21,7 @@ public class LocalStorage { private final Descriptors.Descriptor messageDescriptor; private final List metadataFieldDescriptor; private final List policies; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; public LocalFileWriter createLocalFileWriter(Path partitionPath) { Path basePath = Paths.get(sinkConfig.getLocalDirectory()); @@ -35,7 +35,7 @@ private LocalParquetFileWriter createWriter(Path basePath, Path fullPath) { switch (sinkConfig.getLocalFileWriterType()) { case PARQUET: try { - instrumentation.logInfo("Creating Local File " + fullPath); + firehoseInstrumentation.logInfo("Creating Local File " + fullPath); return new LocalParquetFileWriter( System.currentTimeMillis(), basePath.toString(), @@ -57,8 +57,8 @@ public void deleteLocalFile(String pathString) { try { Path filePath = Paths.get(pathString); Path crcFilePath = filePath.getParent().resolve("." + filePath.getFileName() + ".crc"); - instrumentation.logInfo("Deleting Local File {}", filePath); - instrumentation.logInfo("Deleting Local File {}", crcFilePath); + firehoseInstrumentation.logInfo("Deleting Local File {}", filePath); + firehoseInstrumentation.logInfo("Deleting Local File {}", crcFilePath); deleteLocalFile(filePath, crcFilePath); } catch (IOException e) { throw new LocalFileWriterFailedException(e); diff --git a/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageChecker.java b/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageChecker.java index 482780180..8350170a3 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageChecker.java +++ b/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageChecker.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.blob.writer.remote; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.blobstorage.BlobStorage; import io.odpf.firehose.sink.blob.writer.local.LocalFileMetadata; import lombok.AllArgsConstructor; @@ -21,7 +21,7 @@ public class BlobStorageChecker implements Runnable { private final Set remoteUploadFutures; private final ExecutorService remoteUploadScheduler; private final BlobStorage blobStorage; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; @Override public void run() { @@ -36,7 +36,7 @@ public void run() { private BlobStorageWriterFutureHandler submitTask(LocalFileMetadata localFileMetadata) { BlobStorageWorker worker = new BlobStorageWorker(blobStorage, localFileMetadata); Future f = remoteUploadScheduler.submit(worker); - return new BlobStorageWriterFutureHandler(f, localFileMetadata, instrumentation); + return new BlobStorageWriterFutureHandler(f, localFileMetadata, firehoseInstrumentation); } } diff --git a/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandler.java b/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandler.java index 4cbadfcbd..7215acfff 100644 --- a/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandler.java +++ b/src/main/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandler.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.blob.writer.remote; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.blobstorage.BlobStorageException; import io.odpf.firehose.sink.blob.writer.local.LocalFileMetadata; import lombok.AllArgsConstructor; @@ -18,7 +18,7 @@ public class BlobStorageWriterFutureHandler { private Future future; private LocalFileMetadata localFileMetadata; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private static final String EMPTY = ""; public String getFullPath() { @@ -43,21 +43,21 @@ public boolean isFinished() { } private void captureFileUploadSuccessMetric(long totalTime) { - instrumentation.logInfo("Flushed to blob storage {}", localFileMetadata.getFullPath()); - instrumentation.incrementCounter(FILE_UPLOAD_TOTAL, SUCCESS_TAG); - instrumentation.captureCount(FILE_UPLOAD_BYTES, localFileMetadata.getSize()); - instrumentation.captureCount(FILE_UPLOAD_RECORDS_TOTAL, localFileMetadata.getRecordCount()); - instrumentation.captureDuration(FILE_UPLOAD_TIME_MILLISECONDS, totalTime); + firehoseInstrumentation.logInfo("Flushed to blob storage {}", localFileMetadata.getFullPath()); + firehoseInstrumentation.incrementCounter(FILE_UPLOAD_TOTAL, SUCCESS_TAG); + firehoseInstrumentation.captureCount(FILE_UPLOAD_BYTES, localFileMetadata.getSize()); + firehoseInstrumentation.captureCount(FILE_UPLOAD_RECORDS_TOTAL, localFileMetadata.getRecordCount()); + firehoseInstrumentation.captureDuration(FILE_UPLOAD_TIME_MILLISECONDS, totalTime); } private void captureUploadFailedMetric(Throwable e) { - instrumentation.logError("Failed to flush to blob storage {}", e.getMessage()); + firehoseInstrumentation.logError("Failed to flush to blob storage {}", e.getMessage()); String errorType; if (e instanceof BlobStorageException) { errorType = ((BlobStorageException) e).getErrorType(); } else { errorType = ""; } - instrumentation.incrementCounter(FILE_UPLOAD_TOTAL, FAILURE_TAG, tag(BLOB_STORAGE_ERROR_TYPE_TAG, errorType)); + firehoseInstrumentation.incrementCounter(FILE_UPLOAD_TOTAL, FAILURE_TAG, tag(BLOB_STORAGE_ERROR_TYPE_TAG, errorType)); } } diff --git a/src/main/java/io/odpf/firehose/sink/common/AbstractHttpSink.java b/src/main/java/io/odpf/firehose/sink/common/AbstractHttpSink.java index e3e93ca30..501f71447 100644 --- a/src/main/java/io/odpf/firehose/sink/common/AbstractHttpSink.java +++ b/src/main/java/io/odpf/firehose/sink/common/AbstractHttpSink.java @@ -3,7 +3,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.NeedToRetry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.stencil.client.StencilClient; import joptsimple.internal.Strings; @@ -35,8 +35,8 @@ public abstract class AbstractHttpSink extends AbstractSink { private final Map requestLogStatusCodeRanges; protected static final String SUCCESS_CODE_PATTERN = "^2.*"; - public AbstractHttpSink(Instrumentation instrumentation, String sinkType, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { - super(instrumentation, sinkType); + public AbstractHttpSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { + super(firehoseInstrumentation, sinkType); this.httpClient = httpClient; this.stencilClient = stencilClient; this.retryStatusCodeRanges = retryStatusCodeRanges; @@ -50,7 +50,7 @@ public List execute() throws Exception { try { response = httpClient.execute(httpRequest); List contentStringList = null; - getInstrumentation().logInfo("Response Status: {}", statusCode(response)); + getFirehoseInstrumentation().logInfo("Response Status: {}", statusCode(response)); if (shouldLogResponse(response)) { printResponse(response); } @@ -74,7 +74,7 @@ public List execute() throws Exception { @Override public void close() throws IOException { - getInstrumentation().logInfo("HTTP connection closing"); + getFirehoseInstrumentation().logInfo("HTTP connection closing"); getHttpRequests().clear(); getStencilClient().close(); } @@ -92,7 +92,7 @@ private boolean shouldLogRequest(HttpResponse response) { } private boolean shouldLogResponse(HttpResponse response) { - return getInstrumentation().isDebugEnabled() && response != null && response.getEntity() != null; + return getFirehoseInstrumentation().isDebugEnabled() && response != null && response.getEntity() != null; } private boolean shouldRetry(HttpResponse response) { @@ -111,7 +111,7 @@ protected String statusCode(HttpResponse response) { private void captureHttpStatusCount(HttpResponse response) { String statusCode = statusCode(response); String httpCodeTag = statusCode.equals("null") ? "status_code=" : "status_code=" + statusCode; - getInstrumentation().captureCount(SINK_HTTP_RESPONSE_CODE_TOTAL, 1, httpCodeTag); + getFirehoseInstrumentation().captureCount(SINK_HTTP_RESPONSE_CODE_TOTAL, 1L, httpCodeTag); } private void printRequest(HttpEntityEnclosingRequestBase httpRequest, List contentStringList) throws IOException { @@ -120,7 +120,7 @@ private void printRequest(HttpEntityEnclosingRequestBase httpRequest, List configuration, StatsDReporter KafkaProducer kafkaProducer = KafkaUtils.getKafkaProducer(dlqKafkaProducerConfig); TracingKafkaProducer tracingProducer = new TracingKafkaProducer<>(kafkaProducer, tracer); - return new KafkaDlqWriter(tracingProducer, dlqKafkaProducerConfig.getDlqKafkaTopic(), new Instrumentation(client, KafkaDlqWriter.class)); + return new KafkaDlqWriter(tracingProducer, dlqKafkaProducerConfig.getDlqKafkaTopic(), new FirehoseInstrumentation(client, KafkaDlqWriter.class)); case BLOB_STORAGE: switch (dlqConfig.getBlobStorageType()) { @@ -44,7 +44,7 @@ public static DlqWriter create(Map configuration, StatsDReporter BlobStorage blobStorage = BlobStorageFactory.createObjectStorage(dlqConfig.getBlobStorageType(), configuration); return new BlobStorageDlqWriter(blobStorage); case LOG: - return new LogDlqWriter(new Instrumentation(client, LogDlqWriter.class)); + return new LogDlqWriter(new FirehoseInstrumentation(client, LogDlqWriter.class)); default: throw new IllegalArgumentException("DLQ Writer type " + dlqConfig.getDlqWriterType() + " is not supported"); diff --git a/src/main/java/io/odpf/firehose/sink/dlq/kafka/KafkaDlqWriter.java b/src/main/java/io/odpf/firehose/sink/dlq/kafka/KafkaDlqWriter.java index eb59c22a1..3f7b72729 100644 --- a/src/main/java/io/odpf/firehose/sink/dlq/kafka/KafkaDlqWriter.java +++ b/src/main/java/io/odpf/firehose/sink/dlq/kafka/KafkaDlqWriter.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.dlq.kafka; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.dlq.DlqWriter; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; @@ -17,12 +17,12 @@ public class KafkaDlqWriter implements DlqWriter { private Producer kafkaProducer; private final String topic; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - public KafkaDlqWriter(Producer kafkaProducer, String topic, Instrumentation instrumentation) { + public KafkaDlqWriter(Producer kafkaProducer, String topic, FirehoseInstrumentation firehoseInstrumentation) { this.kafkaProducer = kafkaProducer; this.topic = topic; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } @Override @@ -34,7 +34,7 @@ public List write(List messages) throws IOException { AtomicInteger recordsProcessed = new AtomicInteger(); List failedMessages = new ArrayList<>(); - instrumentation.logInfo("Pushing {} messages to retry queue topic : {}", messages.size(), topic); + firehoseInstrumentation.logInfo("Pushing {} messages to retry queue topic : {}", messages.size(), topic); for (Message message : messages) { kafkaProducer.send(new ProducerRecord<>(topic, null, null, message.getLogKey(), message.getLogMessage(), message.getHeaders()), (metadata, e) -> { @@ -53,10 +53,10 @@ public List write(List messages) throws IOException { try { completedLatch.await(); } catch (InterruptedException e) { - instrumentation.logWarn(e.getMessage()); - instrumentation.captureNonFatalError(e); + firehoseInstrumentation.logWarn(e.getMessage()); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, ""); } - instrumentation.logInfo("Successfully pushed {} messages to {}", messages.size() - failedMessages.size(), topic); + firehoseInstrumentation.logInfo("Successfully pushed {} messages to {}", messages.size() - failedMessages.size(), topic); return failedMessages; } } diff --git a/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java b/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java index bfcfb449a..595c71725 100644 --- a/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java +++ b/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.dlq.log; -import io.odpf.firehose.error.ErrorInfo; +import io.odpf.depot.error.ErrorInfo; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.dlq.DlqWriter; import org.apache.commons.lang3.exception.ExceptionUtils; @@ -11,10 +11,10 @@ import java.util.List; public class LogDlqWriter implements DlqWriter { - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; - public LogDlqWriter(Instrumentation instrumentation) { - this.instrumentation = instrumentation; + public LogDlqWriter(FirehoseInstrumentation firehoseInstrumentation) { + this.firehoseInstrumentation = firehoseInstrumentation; } @Override @@ -31,7 +31,7 @@ public List write(List messages) throws IOException { } } - instrumentation.logInfo("key: {}\nvalue: {}\nerror: {}", key, value, error); + firehoseInstrumentation.logInfo("key: {}\nvalue: {}\nerror: {}", key, value, error); } return new LinkedList<>(); } diff --git a/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSink.java b/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSink.java index e1f6a299c..2acacd0bc 100644 --- a/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSink.java +++ b/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSink.java @@ -2,7 +2,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.NeedToRetry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.elasticsearch.request.EsRequestHandler; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -31,7 +31,7 @@ public class EsSink extends AbstractSink { /** * Instantiates a new Es sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param sinkType the sink type * @param client the client * @param esRequestHandler the es request handler @@ -39,9 +39,9 @@ public class EsSink extends AbstractSink { * @param esWaitForActiveShardsCount the es wait for active shards count * @param esRetryStatusCodeBlacklist the es retry status code blacklist */ - public EsSink(Instrumentation instrumentation, String sinkType, RestHighLevelClient client, EsRequestHandler esRequestHandler, + public EsSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, RestHighLevelClient client, EsRequestHandler esRequestHandler, long esRequestTimeoutInMs, Integer esWaitForActiveShardsCount, List esRetryStatusCodeBlacklist) { - super(instrumentation, sinkType); + super(firehoseInstrumentation, sinkType); this.client = client; this.esRequestHandler = esRequestHandler; this.esRequestTimeoutInMs = esRequestTimeoutInMs; @@ -61,7 +61,7 @@ protected void prepare(List messages) { protected List execute() throws Exception { BulkResponse bulkResponse = getBulkResponse(); if (bulkResponse.hasFailures()) { - getInstrumentation().logWarn("Bulk request failed"); + getFirehoseInstrumentation().logWarn("Bulk request failed"); handleResponse(bulkResponse); } return new ArrayList<>(); @@ -69,7 +69,7 @@ protected List execute() throws Exception { @Override public void close() throws IOException { - getInstrumentation().logInfo("Elastic Search connection closing"); + getFirehoseInstrumentation().logInfo("Elastic Search connection closing"); this.client.close(); } @@ -84,14 +84,14 @@ private void handleResponse(BulkResponse bulkResponse) throws NeedToRetry { failedResponseCount++; String responseStatus = String.valueOf(response.status().getStatus()); if (esRetryStatusCodeBlacklist.contains(responseStatus)) { - getInstrumentation().logInfo("Not retrying due to response status: {} is under blacklisted status code", responseStatus); - getInstrumentation().incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=" + response.status().name()); - getInstrumentation().logInfo("Message dropped because of status code: " + responseStatus); + getFirehoseInstrumentation().logInfo("Not retrying due to response status: {} is under blacklisted status code", responseStatus); + getFirehoseInstrumentation().incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=" + response.status().name()); + getFirehoseInstrumentation().logInfo("Message dropped because of status code: " + responseStatus); } else { throw new NeedToRetry(bulkResponse.buildFailureMessage()); } } } - getInstrumentation().logWarn("Bulk request failed count: {}", failedResponseCount); + getFirehoseInstrumentation().logWarn("Bulk request failed count: {}", failedResponseCount); } } diff --git a/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactory.java b/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactory.java index 873e9e1b0..a0d9b6804 100644 --- a/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactory.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.elasticsearch; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.EsSinkConfig; import io.odpf.firehose.config.enums.SinkType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageToJson; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.sink.elasticsearch.request.EsRequestHandler; @@ -36,7 +36,7 @@ public class EsSinkFactory { public static Sink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { EsSinkConfig esSinkConfig = ConfigFactory.create(EsSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, EsSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, EsSinkFactory.class); String esConfig = String.format("\n\tES connection urls: %s\n\tES index name: %s\n\tES id field: %s\n\tES message type: %s" + "\n\tES type name: %s\n\tES request timeout in ms: %s\n\tES retry status code blacklist: %s" + "\n\tES routing key name: %s\n\tES wait for active shards count: %s\n\tES update only mode: %s" @@ -45,8 +45,8 @@ public static Sink create(Map configuration, StatsDReporter stat esSinkConfig.getSinkEsTypeName(), esSinkConfig.getSinkEsRequestTimeoutMs(), esSinkConfig.getSinkEsRetryStatusCodeBlacklist(), esSinkConfig.getSinkEsRoutingKeyName(), esSinkConfig.getSinkEsShardsActiveWaitCount(), esSinkConfig.isSinkEsModeUpdateOnlyEnable(), esSinkConfig.isSinkEsPreserveProtoFieldNamesEnable()); - instrumentation.logDebug(esConfig); - EsRequestHandler esRequestHandler = new EsRequestHandlerFactory(esSinkConfig, new Instrumentation(statsDReporter, EsRequestHandlerFactory.class), + firehoseInstrumentation.logDebug(esConfig); + EsRequestHandler esRequestHandler = new EsRequestHandlerFactory(esSinkConfig, new FirehoseInstrumentation(statsDReporter, EsRequestHandlerFactory.class), esSinkConfig.getSinkEsIdField(), esSinkConfig.getSinkEsInputMessageType(), new MessageToJson(stencilClient.getParser(esSinkConfig.getInputSchemaProtoClass()), esSinkConfig.isSinkEsPreserveProtoFieldNamesEnable(), false), esSinkConfig.getSinkEsTypeName(), @@ -54,14 +54,14 @@ public static Sink create(Map configuration, StatsDReporter stat esSinkConfig.getSinkEsRoutingKeyName()) .getRequestHandler(); - HttpHost[] httpHosts = getHttpHosts(esSinkConfig.getSinkEsConnectionUrls(), instrumentation); + HttpHost[] httpHosts = getHttpHosts(esSinkConfig.getSinkEsConnectionUrls(), firehoseInstrumentation); RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(httpHosts)); - instrumentation.logInfo("ES connection established"); - return new EsSink(new Instrumentation(statsDReporter, EsSink.class), SinkType.ELASTICSEARCH.name().toLowerCase(), client, esRequestHandler, + firehoseInstrumentation.logInfo("ES connection established"); + return new EsSink(new FirehoseInstrumentation(statsDReporter, EsSink.class), SinkType.ELASTICSEARCH.name().toLowerCase(), client, esRequestHandler, esSinkConfig.getSinkEsRequestTimeoutMs(), esSinkConfig.getSinkEsShardsActiveWaitCount(), getStatusCodesAsList(esSinkConfig.getSinkEsRetryStatusCodeBlacklist())); } - protected static HttpHost[] getHttpHosts(String esConnectionUrls, Instrumentation instrumentation) { + protected static HttpHost[] getHttpHosts(String esConnectionUrls, FirehoseInstrumentation firehoseInstrumentation) { if (esConnectionUrls != null && !esConnectionUrls.isEmpty()) { String[] esNodes = esConnectionUrls.trim().split(","); HttpHost[] httpHosts = new HttpHost[esNodes.length]; @@ -74,7 +74,7 @@ protected static HttpHost[] getHttpHosts(String esConnectionUrls, Instrumentatio } return httpHosts; } else { - instrumentation.logError("No connection URL found"); + firehoseInstrumentation.logError("No connection URL found"); throw new IllegalArgumentException("SINK_ES_CONNECTION_URLS is empty or null"); } } diff --git a/src/main/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactory.java b/src/main/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactory.java index 0d94e4abe..94a687d99 100644 --- a/src/main/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactory.java +++ b/src/main/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactory.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.EsSinkConfig; import io.odpf.firehose.config.enums.EsSinkMessageType; import io.odpf.firehose.config.enums.EsSinkRequestType; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageToJson; import lombok.AllArgsConstructor; @@ -16,7 +16,7 @@ public class EsRequestHandlerFactory { private EsSinkConfig esSinkConfig; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private final String esIdFieldName; private final EsSinkMessageType messageType; private final MessageToJson jsonSerializer; @@ -26,7 +26,7 @@ public class EsRequestHandlerFactory { public EsRequestHandler getRequestHandler() { EsSinkRequestType esSinkRequestType = esSinkConfig.isSinkEsModeUpdateOnlyEnable() ? UPDATE_ONLY : INSERT_OR_UPDATE; - instrumentation.logInfo("ES request mode: {}", esSinkRequestType); + firehoseInstrumentation.logInfo("ES request mode: {}", esSinkRequestType); ArrayList esRequestHandlers = new ArrayList<>(); esRequestHandlers.add(new EsUpdateRequestHandler(messageType, jsonSerializer, esTypeName, esIndexName, esSinkRequestType, esIdFieldName, esRoutingKeyName)); diff --git a/src/main/java/io/odpf/firehose/sink/grpc/GrpcSink.java b/src/main/java/io/odpf/firehose/sink/grpc/GrpcSink.java index 10b87cfae..055d7a0f7 100644 --- a/src/main/java/io/odpf/firehose/sink/grpc/GrpcSink.java +++ b/src/main/java/io/odpf/firehose/sink/grpc/GrpcSink.java @@ -4,7 +4,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.grpc.client.GrpcClient; import com.google.protobuf.DynamicMessage; @@ -24,8 +24,8 @@ public class GrpcSink extends AbstractSink { private List messages; private StencilClient stencilClient; - public GrpcSink(Instrumentation instrumentation, GrpcClient grpcClient, StencilClient stencilClient) { - super(instrumentation, "grpc"); + public GrpcSink(FirehoseInstrumentation firehoseInstrumentation, GrpcClient grpcClient, StencilClient stencilClient) { + super(firehoseInstrumentation, "grpc"); this.grpcClient = grpcClient; this.stencilClient = stencilClient; } @@ -36,16 +36,16 @@ protected List execute() throws Exception { for (Message message : this.messages) { DynamicMessage response = grpcClient.execute(message.getLogMessage(), message.getHeaders()); - getInstrumentation().logDebug("Response: {}", response); + getFirehoseInstrumentation().logDebug("Response: {}", response); Object m = response.getField(response.getDescriptorForType().findFieldByName("success")); boolean success = (m != null) ? Boolean.valueOf(String.valueOf(m)) : false; if (!success) { - getInstrumentation().logWarn("Grpc Service returned error"); + getFirehoseInstrumentation().logWarn("Grpc Service returned error"); failedMessages.add(message); } } - getInstrumentation().logDebug("Failed messages count: {}", failedMessages.size()); + getFirehoseInstrumentation().logDebug("Failed messages count: {}", failedMessages.size()); return failedMessages; } @@ -56,7 +56,7 @@ protected void prepare(List messages2) throws DeserializerException { @Override public void close() throws IOException { - getInstrumentation().logInfo("GRPC connection closing"); + getFirehoseInstrumentation().logInfo("GRPC connection closing"); this.messages = new ArrayList<>(); stencilClient.close(); } diff --git a/src/main/java/io/odpf/firehose/sink/grpc/GrpcSinkFactory.java b/src/main/java/io/odpf/firehose/sink/grpc/GrpcSinkFactory.java index b08ef609b..023862730 100644 --- a/src/main/java/io/odpf/firehose/sink/grpc/GrpcSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/grpc/GrpcSinkFactory.java @@ -1,9 +1,9 @@ package io.odpf.firehose.sink.grpc; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.GrpcSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.grpc.client.GrpcClient; import io.grpc.ManagedChannel; @@ -24,17 +24,17 @@ public class GrpcSinkFactory { public static AbstractSink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { GrpcSinkConfig grpcConfig = ConfigFactory.create(GrpcSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, GrpcSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, GrpcSinkFactory.class); String grpcSinkConfig = String.format("\n\tService host: %s\n\tService port: %s\n\tMethod url: %s\n\tResponse proto schema: %s", grpcConfig.getSinkGrpcServiceHost(), grpcConfig.getSinkGrpcServicePort(), grpcConfig.getSinkGrpcMethodUrl(), grpcConfig.getSinkGrpcResponseSchemaProtoClass()); - instrumentation.logDebug(grpcSinkConfig); + firehoseInstrumentation.logDebug(grpcSinkConfig); ManagedChannel managedChannel = ManagedChannelBuilder.forAddress(grpcConfig.getSinkGrpcServiceHost(), grpcConfig.getSinkGrpcServicePort()).usePlaintext().build(); - GrpcClient grpcClient = new GrpcClient(new Instrumentation(statsDReporter, GrpcClient.class), grpcConfig, managedChannel, stencilClient); - instrumentation.logInfo("GRPC connection established"); + GrpcClient grpcClient = new GrpcClient(new FirehoseInstrumentation(statsDReporter, GrpcClient.class), grpcConfig, managedChannel, stencilClient); + firehoseInstrumentation.logInfo("GRPC connection established"); - return new GrpcSink(new Instrumentation(statsDReporter, GrpcSink.class), grpcClient, stencilClient); + return new GrpcSink(new FirehoseInstrumentation(statsDReporter, GrpcSink.class), grpcClient, stencilClient); } } diff --git a/src/main/java/io/odpf/firehose/sink/grpc/client/GrpcClient.java b/src/main/java/io/odpf/firehose/sink/grpc/client/GrpcClient.java index d15fd32f6..bf4c5971d 100644 --- a/src/main/java/io/odpf/firehose/sink/grpc/client/GrpcClient.java +++ b/src/main/java/io/odpf/firehose/sink/grpc/client/GrpcClient.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.GrpcSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import com.google.protobuf.DynamicMessage; import io.grpc.ManagedChannel; @@ -29,13 +29,13 @@ */ public class GrpcClient { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private final GrpcSinkConfig grpcSinkConfig; private StencilClient stencilClient; private ManagedChannel managedChannel; - public GrpcClient(Instrumentation instrumentation, GrpcSinkConfig grpcSinkConfig, ManagedChannel managedChannel, StencilClient stencilClient) { - this.instrumentation = instrumentation; + public GrpcClient(FirehoseInstrumentation firehoseInstrumentation, GrpcSinkConfig grpcSinkConfig, ManagedChannel managedChannel, StencilClient stencilClient) { + this.firehoseInstrumentation = firehoseInstrumentation; this.grpcSinkConfig = grpcSinkConfig; this.stencilClient = stencilClient; this.managedChannel = managedChannel; @@ -68,7 +68,7 @@ public DynamicMessage execute(byte[] logMessage, Headers headers) { dynamicMessage = stencilClient.parse(grpcSinkConfig.getSinkGrpcResponseSchemaProtoClass(), response); } catch (Exception e) { - instrumentation.logWarn(e.getMessage()); + firehoseInstrumentation.logWarn(e.getMessage()); dynamicMessage = DynamicMessage.newBuilder(this.stencilClient.get(this.grpcSinkConfig.getSinkGrpcResponseSchemaProtoClass())).build(); } diff --git a/src/main/java/io/odpf/firehose/sink/http/HttpSink.java b/src/main/java/io/odpf/firehose/sink/http/HttpSink.java index 01212d623..330477fb4 100644 --- a/src/main/java/io/odpf/firehose/sink/http/HttpSink.java +++ b/src/main/java/io/odpf/firehose/sink/http/HttpSink.java @@ -3,7 +3,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.AbstractHttpSink; import io.odpf.firehose.sink.http.request.types.Request; import io.odpf.stencil.client.StencilClient; @@ -35,15 +35,15 @@ public class HttpSink extends AbstractHttpSink { /** * Instantiates a new Http sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param request the request * @param httpClient the http client * @param stencilClient the stencil client * @param retryStatusCodeRanges the retry status code ranges * @param requestLogStatusCodeRanges the request log status code ranges */ - public HttpSink(Instrumentation instrumentation, Request request, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { - super(instrumentation, "http", httpClient, stencilClient, retryStatusCodeRanges, requestLogStatusCodeRanges); + public HttpSink(FirehoseInstrumentation firehoseInstrumentation, Request request, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { + super(firehoseInstrumentation, "http", httpClient, stencilClient, retryStatusCodeRanges, requestLogStatusCodeRanges); this.request = request; } @@ -68,7 +68,7 @@ protected void captureMessageDropCount(HttpResponse response, List conte List result = Arrays.asList(requestBody.replaceAll("^\\[|]$", "").split("},\\s*\\{")); - getInstrumentation().captureCount(SINK_MESSAGES_DROP_TOTAL, result.size(), "cause= " + statusCode(response)); - getInstrumentation().logInfo("Message dropped because of status code: " + statusCode(response)); + getFirehoseInstrumentation().captureCount(SINK_MESSAGES_DROP_TOTAL, (long) result.size(), "cause= " + statusCode(response)); + getFirehoseInstrumentation().logInfo("Message dropped because of status code: " + statusCode(response)); } } diff --git a/src/main/java/io/odpf/firehose/sink/http/HttpSinkFactory.java b/src/main/java/io/odpf/firehose/sink/http/HttpSinkFactory.java index 658f926a9..3f6c8d964 100644 --- a/src/main/java/io/odpf/firehose/sink/http/HttpSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/http/HttpSinkFactory.java @@ -1,9 +1,9 @@ package io.odpf.firehose.sink.http; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.http.auth.OAuth2Credential; import io.odpf.firehose.sink.http.request.types.Request; @@ -38,16 +38,16 @@ public class HttpSinkFactory { public static AbstractSink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { HttpSinkConfig httpSinkConfig = ConfigFactory.create(HttpSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, HttpSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, HttpSinkFactory.class); CloseableHttpClient closeableHttpClient = newHttpClient(httpSinkConfig, statsDReporter); - instrumentation.logInfo("HTTP connection established"); + firehoseInstrumentation.logInfo("HTTP connection established"); UriParser uriParser = new UriParser(stencilClient.getParser(httpSinkConfig.getInputSchemaProtoClass()), httpSinkConfig.getKafkaRecordParserMode()); Request request = new RequestFactory(statsDReporter, httpSinkConfig, stencilClient, uriParser).createRequest(); - return new HttpSink(new Instrumentation(statsDReporter, HttpSink.class), request, closeableHttpClient, stencilClient, httpSinkConfig.getSinkHttpRetryStatusCodeRanges(), httpSinkConfig.getSinkHttpRequestLogStatusCodeRanges()); + return new HttpSink(new FirehoseInstrumentation(statsDReporter, HttpSink.class), request, closeableHttpClient, stencilClient, httpSinkConfig.getSinkHttpRetryStatusCodeRanges(), httpSinkConfig.getSinkHttpRequestLogStatusCodeRanges()); } private static CloseableHttpClient newHttpClient(HttpSinkConfig httpSinkConfig, StatsDReporter statsDReporter) { @@ -61,7 +61,7 @@ private static CloseableHttpClient newHttpClient(HttpSinkConfig httpSinkConfig, HttpClientBuilder builder = HttpClients.custom().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig); if (httpSinkConfig.isSinkHttpOAuth2Enable()) { OAuth2Credential oauth2 = new OAuth2Credential( - new Instrumentation(statsDReporter, OAuth2Credential.class), + new FirehoseInstrumentation(statsDReporter, OAuth2Credential.class), httpSinkConfig.getSinkHttpOAuth2ClientName(), httpSinkConfig.getSinkHttpOAuth2ClientSecret(), httpSinkConfig.getSinkHttpOAuth2Scope(), diff --git a/src/main/java/io/odpf/firehose/sink/http/auth/OAuth2Credential.java b/src/main/java/io/odpf/firehose/sink/http/auth/OAuth2Credential.java index b41139502..461327f91 100644 --- a/src/main/java/io/odpf/firehose/sink/http/auth/OAuth2Credential.java +++ b/src/main/java/io/odpf/firehose/sink/http/auth/OAuth2Credential.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.http.auth; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import okhttp3.Interceptor; import okhttp3.Request; import okhttp3.Response; @@ -18,15 +18,15 @@ public class OAuth2Credential implements Interceptor { private final OAuth2Client client; private OAuth2AccessToken accessToken; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - public OAuth2Credential(Instrumentation instrumentation, String clientId, String clientSecret, String scope, String accessTokenEndpoint) { - this.instrumentation = instrumentation; + public OAuth2Credential(FirehoseInstrumentation firehoseInstrumentation, String clientId, String clientSecret, String scope, String accessTokenEndpoint) { + this.firehoseInstrumentation = firehoseInstrumentation; this.client = new OAuth2Client(clientId, clientSecret, scope, accessTokenEndpoint); } public void requestAccessToken() throws IOException { - instrumentation.logInfo("Requesting Access Token, expires in: {0}", + firehoseInstrumentation.logInfo("Requesting Access Token, expires in: {0}", (this.accessToken == null ? "" : this.accessToken.getExpiresIn())); OAuth2AccessToken token = client.requestClientCredentialsGrantAccessToken(); setAccessToken(token); @@ -40,7 +40,7 @@ public HttpRequestInterceptor requestInterceptor() { } request.addHeader("Authorization", "Bearer " + getAccessToken().toString()); } catch (IOException e) { - instrumentation.logWarn("OAuth2 request access token failed: {0}", e.getMessage()); + firehoseInstrumentation.logWarn("OAuth2 request access token failed: {0}", e.getMessage()); } }; } @@ -75,7 +75,7 @@ public Response intercept(Chain chain) throws IOException { } request = request.newBuilder().header("Authorization", "Bearer " + getAccessToken().toString()).build(); } catch (IOException e) { - instrumentation.logWarn("OAuth2 request access token failed: {0}", e.getMessage()); + firehoseInstrumentation.logWarn("OAuth2 request access token failed: {0}", e.getMessage()); } Response response = chain.proceed(request); diff --git a/src/main/java/io/odpf/firehose/sink/http/factory/SerializerFactory.java b/src/main/java/io/odpf/firehose/sink/http/factory/SerializerFactory.java index d81032d11..9c7db35bf 100644 --- a/src/main/java/io/odpf/firehose/sink/http/factory/SerializerFactory.java +++ b/src/main/java/io/odpf/firehose/sink/http/factory/SerializerFactory.java @@ -1,11 +1,9 @@ package io.odpf.firehose.sink.http.factory; - - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkDataFormatType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageSerializer; import io.odpf.firehose.serializer.MessageToJson; import io.odpf.firehose.serializer.MessageToTemplatizedJson; @@ -25,9 +23,9 @@ public class SerializerFactory { private StatsDReporter statsDReporter; public MessageSerializer build() { - Instrumentation instrumentation = new Instrumentation(statsDReporter, SerializerFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, SerializerFactory.class); if (isProtoSchemaEmpty() || httpSinkConfig.getSinkHttpDataFormat() == HttpSinkDataFormatType.PROTO) { - instrumentation.logDebug("Serializer type: JsonWrappedProtoByte"); + firehoseInstrumentation.logDebug("Serializer type: JsonWrappedProtoByte"); // Fallback to json wrapped proto byte return new JsonWrappedProtoByte(); } @@ -35,18 +33,18 @@ public MessageSerializer build() { if (httpSinkConfig.getSinkHttpDataFormat() == HttpSinkDataFormatType.JSON) { Parser protoParser = stencilClient.getParser(httpSinkConfig.getInputSchemaProtoClass()); if (httpSinkConfig.getSinkHttpJsonBodyTemplate().isEmpty()) { - instrumentation.logDebug("Serializer type: EsbMessageToJson", HttpSinkDataFormatType.JSON); + firehoseInstrumentation.logDebug("Serializer type: EsbMessageToJson", HttpSinkDataFormatType.JSON); return new MessageToJson(protoParser, false, true); } else { - instrumentation.logDebug("Serializer type: EsbMessageToTemplatizedJson"); - return MessageToTemplatizedJson.create(new Instrumentation(statsDReporter, MessageToTemplatizedJson.class), httpSinkConfig.getSinkHttpJsonBodyTemplate(), protoParser); + firehoseInstrumentation.logDebug("Serializer type: EsbMessageToTemplatizedJson"); + return MessageToTemplatizedJson.create(new FirehoseInstrumentation(statsDReporter, MessageToTemplatizedJson.class), httpSinkConfig.getSinkHttpJsonBodyTemplate(), protoParser); } } // Ideally this code will never be executed because getHttpSinkDataFormat() will return proto as default value. // This is required to satisfy compilation. - instrumentation.logDebug("Serializer type: JsonWrappedProtoByte"); + firehoseInstrumentation.logDebug("Serializer type: JsonWrappedProtoByte"); return new JsonWrappedProtoByte(); } diff --git a/src/main/java/io/odpf/firehose/sink/http/request/RequestFactory.java b/src/main/java/io/odpf/firehose/sink/http/request/RequestFactory.java index 57a367e44..b90360432 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/RequestFactory.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/RequestFactory.java @@ -1,11 +1,10 @@ package io.odpf.firehose.sink.http.request; - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.serializer.MessageSerializer; import io.odpf.firehose.sink.http.factory.SerializerFactory; @@ -34,7 +33,7 @@ public class RequestFactory { private UriParser uriParser; private StencilClient stencilClient; private StatsDReporter statsDReporter; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; /** * Instantiates a new Request factory. @@ -49,7 +48,7 @@ public RequestFactory(StatsDReporter statsDReporter, HttpSinkConfig httpSinkConf this.stencilClient = stencilClient; this.httpSinkConfig = httpSinkConfig; this.uriParser = uriParser; - instrumentation = new Instrumentation(this.statsDReporter, RequestFactory.class); + firehoseInstrumentation = new FirehoseInstrumentation(this.statsDReporter, RequestFactory.class); } public Request createRequest() { @@ -69,7 +68,7 @@ public Request createRequest() { .filter(Request::canProcess) .findFirst() .orElse(new SimpleRequest(statsDReporter, httpSinkConfig, body, httpSinkRequestMethodType)); - instrumentation.logInfo("Request type: {}", request.getClass()); + firehoseInstrumentation.logInfo("Request type: {}", request.getClass()); return request.setRequestStrategy(headerBuilder, uriBuilder, requestEntityBuilder); } diff --git a/src/main/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreator.java b/src/main/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreator.java index e8711abbc..c6aafad40 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreator.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreator.java @@ -2,7 +2,7 @@ import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.HttpRequestMethodFactory; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; @@ -22,14 +22,14 @@ public class BatchRequestCreator implements RequestCreator { private HeaderBuilder headerBuilder; private HttpSinkRequestMethodType method; private JsonBody jsonBody; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - public BatchRequestCreator(Instrumentation instrumentation, UriBuilder uriBuilder, HeaderBuilder headerBuilder, HttpSinkRequestMethodType method, JsonBody jsonBody) { + public BatchRequestCreator(FirehoseInstrumentation firehoseInstrumentation, UriBuilder uriBuilder, HeaderBuilder headerBuilder, HttpSinkRequestMethodType method, JsonBody jsonBody) { this.uriBuilder = uriBuilder; this.headerBuilder = headerBuilder; this.method = method; this.jsonBody = jsonBody; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } @Override @@ -43,7 +43,7 @@ public List create(List messages, Reque String messagesString = jsonBody.serialize(messages).toString(); request.setEntity(requestEntityBuilder.buildHttpEntity(messagesString)); - instrumentation.logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + firehoseInstrumentation.logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uri, headerMap, jsonBody.serialize(messages), method); return Collections.singletonList(request); } diff --git a/src/main/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreator.java b/src/main/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreator.java index 648a54f74..008457495 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreator.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreator.java @@ -2,7 +2,7 @@ import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.HttpRequestMethodFactory; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; @@ -22,14 +22,14 @@ public class IndividualRequestCreator implements RequestCreator { private JsonBody jsonBody; private HttpSinkRequestMethodType method; private UriBuilder uriBuilder; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - public IndividualRequestCreator(Instrumentation instrumentation, UriBuilder uriBuilder, HeaderBuilder headerBuilder, HttpSinkRequestMethodType method, JsonBody body) { + public IndividualRequestCreator(FirehoseInstrumentation firehoseInstrumentation, UriBuilder uriBuilder, HeaderBuilder headerBuilder, HttpSinkRequestMethodType method, JsonBody body) { this.uriBuilder = uriBuilder; this.headerBuilder = headerBuilder; this.jsonBody = body; this.method = method; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } @Override @@ -45,7 +45,7 @@ public List create(List messages, Reque headerMap.forEach(request::addHeader); request.setEntity(entity.buildHttpEntity(bodyContents.get(i))); - instrumentation.logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + firehoseInstrumentation.logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", requestUrl, headerMap, bodyContents.get(i), method); requests.add(request); diff --git a/src/main/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequest.java b/src/main/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequest.java index e464eaa15..162fc1947 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequest.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.create.IndividualRequestCreator; import io.odpf.firehose.sink.http.request.create.RequestCreator; @@ -60,7 +60,7 @@ public List build(List messages) throws @Override public Request setRequestStrategy(HeaderBuilder headerBuilder, UriBuilder uriBuilder, RequestEntityBuilder requestEntitybuilder) { this.requestCreator = new IndividualRequestCreator( - new Instrumentation(statsDReporter, IndividualRequestCreator.class), uriBuilder, headerBuilder, method, body); + new FirehoseInstrumentation(statsDReporter, IndividualRequestCreator.class), uriBuilder, headerBuilder, method, body); this.requestEntityBuilder = requestEntitybuilder; return this; } diff --git a/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequest.java b/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequest.java index 69f7b8cc5..5c1beff8d 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequest.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.create.IndividualRequestCreator; @@ -72,7 +72,7 @@ public List build(List messages) throws @Override public Request setRequestStrategy(HeaderBuilder headerBuilder, UriBuilder uriBuilder, RequestEntityBuilder requestEntitybuilder) { this.requestCreator = new IndividualRequestCreator( - new Instrumentation(statsDReporter, IndividualRequestCreator.class), uriBuilder, + new FirehoseInstrumentation(statsDReporter, IndividualRequestCreator.class), uriBuilder, headerBuilder.withParameterizedHeader(protoToFieldMapper, httpSinkConfig.getSinkHttpParameterSource()), method, body); this.requestEntityBuilder = requestEntitybuilder; diff --git a/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequest.java b/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequest.java index e566c63ba..9b8d2ad49 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequest.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.create.IndividualRequestCreator; @@ -71,7 +71,7 @@ public List build(List messages) throws @Override public Request setRequestStrategy(HeaderBuilder headerBuilder, UriBuilder uriBuilder, RequestEntityBuilder requestEntitybuilder) { this.requestCreator = new IndividualRequestCreator( - new Instrumentation(statsDReporter, IndividualRequestCreator.class), + new FirehoseInstrumentation(statsDReporter, IndividualRequestCreator.class), uriBuilder.withParameterizedURI(protoToFieldMapper, httpSinkConfig.getSinkHttpParameterSource()), headerBuilder, method, body); this.requestEntityBuilder = requestEntitybuilder; diff --git a/src/main/java/io/odpf/firehose/sink/http/request/types/SimpleRequest.java b/src/main/java/io/odpf/firehose/sink/http/request/types/SimpleRequest.java index d538ef0b1..eebcf3989 100644 --- a/src/main/java/io/odpf/firehose/sink/http/request/types/SimpleRequest.java +++ b/src/main/java/io/odpf/firehose/sink/http/request/types/SimpleRequest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.create.BatchRequestCreator; import io.odpf.firehose.sink.http.request.create.IndividualRequestCreator; @@ -61,10 +61,10 @@ public List build(List messages) throws @Override public Request setRequestStrategy(HeaderBuilder headerBuilder, UriBuilder uriBuilder, RequestEntityBuilder requestEntitybuilder) { if (isTemplateBody(httpSinkConfig)) { - this.requestCreator = new IndividualRequestCreator(new Instrumentation( + this.requestCreator = new IndividualRequestCreator(new FirehoseInstrumentation( statsDReporter, IndividualRequestCreator.class), uriBuilder, headerBuilder, method, body); } else { - this.requestCreator = new BatchRequestCreator(new Instrumentation( + this.requestCreator = new BatchRequestCreator(new FirehoseInstrumentation( statsDReporter, BatchRequestCreator.class), uriBuilder, headerBuilder, method, body); } this.requestEntityBuilder = requestEntitybuilder; diff --git a/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSink.java b/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSink.java index 632860f80..7e5cf3cd8 100644 --- a/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSink.java +++ b/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSink.java @@ -2,11 +2,11 @@ +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.influxdb.builder.PointBuilder; import io.odpf.firehose.config.InfluxSinkConfig; import io.odpf.firehose.message.Message; import io.odpf.firehose.sink.AbstractSink; -import io.odpf.firehose.metrics.Instrumentation; import com.google.protobuf.DynamicMessage; import io.odpf.stencil.client.StencilClient; import io.odpf.stencil.Parser; @@ -34,15 +34,15 @@ public class InfluxSink extends AbstractSink { /** * Instantiates a new Influx sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param sinkType the sink type * @param config the config * @param protoParser the proto parser * @param client the client * @param stencilClient the stencil client */ - public InfluxSink(Instrumentation instrumentation, String sinkType, InfluxSinkConfig config, Parser protoParser, InfluxDB client, StencilClient stencilClient) { - super(instrumentation, sinkType); + public InfluxSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, InfluxSinkConfig config, Parser protoParser, InfluxDB client, StencilClient stencilClient) { + super(firehoseInstrumentation, sinkType); this.config = config; this.protoParser = protoParser; this.pointBuilder = new PointBuilder(config); @@ -56,21 +56,21 @@ protected void prepare(List messages) throws IOException { for (Message message : messages) { DynamicMessage dynamicMessage = protoParser.parse(message.getLogMessage()); Point point = pointBuilder.buildPoint(dynamicMessage); - getInstrumentation().logDebug("Data point: {}", point.toString()); + getFirehoseInstrumentation().logDebug("Data point: {}", point.toString()); batchPoints.point(point); } } @Override protected List execute() { - getInstrumentation().logDebug("Batch points: {}", batchPoints.toString()); + getFirehoseInstrumentation().logDebug("Batch points: {}", batchPoints.toString()); client.write(batchPoints); return new ArrayList<>(); } @Override public void close() throws IOException { - getInstrumentation().logInfo("InfluxDB connection closing"); + getFirehoseInstrumentation().logInfo("InfluxDB connection closing"); stencilClient.close(); } } diff --git a/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSinkFactory.java b/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSinkFactory.java index ce330c2de..67766c235 100644 --- a/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/influxdb/InfluxSinkFactory.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.influxdb; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.InfluxSinkConfig; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.stencil.client.StencilClient; import org.aeonbits.owner.ConfigFactory; import org.influxdb.InfluxDB; @@ -27,12 +27,12 @@ public class InfluxSinkFactory { public static AbstractSink create(Map configProperties, StatsDReporter statsDReporter, StencilClient stencilClient) { InfluxSinkConfig config = ConfigFactory.create(InfluxSinkConfig.class, configProperties); - Instrumentation instrumentation = new Instrumentation(statsDReporter, InfluxSinkFactory.class); - instrumentation.logDebug("\nInflux Url: {}\nInflux Username: {}", config.getSinkInfluxUrl(), config.getSinkInfluxUsername()); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, InfluxSinkFactory.class); + firehoseInstrumentation.logDebug("\nInflux Url: {}\nInflux Username: {}", config.getSinkInfluxUrl(), config.getSinkInfluxUsername()); InfluxDB client = InfluxDBFactory.connect(config.getSinkInfluxUrl(), config.getSinkInfluxUsername(), config.getSinkInfluxPassword()); - instrumentation.logInfo("InfluxDB connection established"); + firehoseInstrumentation.logInfo("InfluxDB connection established"); - return new InfluxSink(new Instrumentation(statsDReporter, InfluxSink.class), "influx.db", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); + return new InfluxSink(new FirehoseInstrumentation(statsDReporter, InfluxSink.class), "influx.db", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); } } diff --git a/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSink.java b/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSink.java index 57017732c..8f643c37d 100644 --- a/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSink.java +++ b/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSink.java @@ -2,7 +2,7 @@ import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.stencil.client.StencilClient; @@ -29,21 +29,21 @@ public class JdbcSink extends AbstractSink { /** * Instantiates a new Jdbc sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param sinkType the sink type * @param pool the pool * @param queryTemplate the query template * @param stencilClient the stencil client */ - public JdbcSink(Instrumentation instrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient) { - super(instrumentation, sinkType); + public JdbcSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient) { + super(firehoseInstrumentation, sinkType); this.pool = pool; this.queryTemplate = queryTemplate; this.stencilClient = stencilClient; } - JdbcSink(Instrumentation instrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient, Statement statement, Connection connection) { - this(instrumentation, sinkType, pool, queryTemplate, stencilClient); + JdbcSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient, Statement statement, Connection connection) { + this(firehoseInstrumentation, sinkType, pool, queryTemplate, stencilClient); this.statement = statement; this.connection = connection; } @@ -63,7 +63,7 @@ protected List createQueries(List messages) { List queries = new ArrayList<>(); for (Message message : messages) { String queryString = queryTemplate.toQueryString(message); - getInstrumentation().logDebug(queryString); + getFirehoseInstrumentation().logDebug(queryString); queries.add(queryString); } return queries; @@ -73,7 +73,7 @@ protected List createQueries(List messages) { protected List execute() throws Exception { try { int[] updateCounts = statement.executeBatch(); - getInstrumentation().logDebug("DB response: {}", Arrays.toString(updateCounts)); + getFirehoseInstrumentation().logDebug("DB response: {}", Arrays.toString(updateCounts)); } finally { if (connection != null) { pool.release(connection); @@ -85,7 +85,7 @@ protected List execute() throws Exception { @Override public void close() throws IOException { try { - getInstrumentation().logInfo("Database connection closing"); + getFirehoseInstrumentation().logInfo("Database connection closing"); pool.shutdown(); stencilClient.close(); } catch (InterruptedException e) { diff --git a/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSinkFactory.java b/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSinkFactory.java index c11cc7c6e..a40ebfd62 100644 --- a/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/jdbc/JdbcSinkFactory.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.jdbc; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.JdbcSinkConfig; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.stencil.client.StencilClient; import io.odpf.stencil.Parser; @@ -32,18 +32,18 @@ public class JdbcSinkFactory { public static AbstractSink create(Map configuration, StatsDReporter statsDReporter, StencilClient client) { JdbcSinkConfig jdbcSinkConfig = ConfigFactory.create(JdbcSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, JdbcSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, JdbcSinkFactory.class); String dbConfig = String.format("" + "\n\tJDBC URL: %s\n\tJDBC Username: %s\n\tJDBC Tablename: %s\n\tUnique keys: %s", jdbcSinkConfig.getSinkJdbcUrl(), jdbcSinkConfig.getSinkJdbcUsername(), jdbcSinkConfig.getSinkJdbcTableName(), jdbcSinkConfig.getSinkJdbcUniqueKeys()); - instrumentation.logDebug(dbConfig); + firehoseInstrumentation.logDebug(dbConfig); JdbcConnectionPool connectionPool = new HikariJdbcConnectionPool(jdbcSinkConfig.getSinkJdbcUrl(), jdbcSinkConfig.getSinkJdbcUsername(), jdbcSinkConfig.getSinkJdbcPassword(), jdbcSinkConfig.getSinkJdbcConnectionPoolMaxSize(), jdbcSinkConfig.getSinkJdbcConnectionPoolTimeoutMs(), jdbcSinkConfig.getSinkJdbcConnectionPoolIdleTimeoutMs(), jdbcSinkConfig.getSinkJdbcConnectionPoolMinIdle()); - instrumentation.logInfo("JDBC Connection established"); + firehoseInstrumentation.logInfo("JDBC Connection established"); QueryTemplate queryTemplate = createQueryTemplate(jdbcSinkConfig, client); - return new JdbcSink(new Instrumentation(statsDReporter, JdbcSink.class), "db", connectionPool, queryTemplate, client); + return new JdbcSink(new FirehoseInstrumentation(statsDReporter, JdbcSink.class), "db", connectionPool, queryTemplate, client); } private static QueryTemplate createQueryTemplate(JdbcSinkConfig jdbcSinkConfig, StencilClient stencilClient) { diff --git a/src/main/java/io/odpf/firehose/sink/log/LogSink.java b/src/main/java/io/odpf/firehose/sink/log/LogSink.java deleted file mode 100644 index 78f277d99..000000000 --- a/src/main/java/io/odpf/firehose/sink/log/LogSink.java +++ /dev/null @@ -1,47 +0,0 @@ -package io.odpf.firehose.sink.log; - -import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.AbstractSink; - -import java.io.IOException; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Sink implementation to write the messages read from kafka to the console. - * The related configurations for LogSink can be found here: {@see io.odpf.firehose.config.LogConfig} - */ -public class LogSink extends AbstractSink { - - private final KeyOrMessageParser parser; - private final Instrumentation instrumentation; - private final List messageList = new ArrayList<>(); - - public LogSink(KeyOrMessageParser parser, Instrumentation instrumentation) { - super(instrumentation, "LOG"); - this.parser = parser; - this.instrumentation = instrumentation; - } - - @Override - protected List execute() throws Exception { - for (Message message : messageList) { - instrumentation.logInfo("\n================= DATA =======================\n{}", parser.parse(message)); - } - return Collections.emptyList(); - } - - @Override - protected void prepare(List messages) throws DeserializerException, IOException, SQLException { - messageList.clear(); - messageList.addAll(messages); - } - - @Override - public void close() throws IOException { - } -} diff --git a/src/main/java/io/odpf/firehose/sink/log/LogSinkFactory.java b/src/main/java/io/odpf/firehose/sink/log/LogSinkFactory.java deleted file mode 100644 index 237ca5f99..000000000 --- a/src/main/java/io/odpf/firehose/sink/log/LogSinkFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -package io.odpf.firehose.sink.log; - - -import io.odpf.firehose.config.AppConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; -import io.odpf.firehose.sink.Sink; -import io.odpf.stencil.client.StencilClient; -import org.aeonbits.owner.ConfigFactory; - -import java.util.Map; - -/** - * Factory class to create the LogSink. - *

- * The consumer framework would reflectively instantiate this factory - * using the configurations supplied and invoke {@see #create(Map < String, String > configuration, StatsDClient client)} - * to obtain the LogSink sink implementation. - */ -public class LogSinkFactory { - - /** - * Creates the LogSink. - * - * @param configuration the configuration - * @param statsDReporter the stats d reporter - * @param stencilClient the stencil client - * @return the sink - */ - public static Sink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { - AppConfig appConfig = ConfigFactory.create(AppConfig.class, configuration); - KeyOrMessageParser parser = new KeyOrMessageParser(stencilClient.getParser(appConfig.getInputSchemaProtoClass()), appConfig); - return new LogSink(parser, new Instrumentation(statsDReporter, LogSink.class)); - } -} diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/MongoSink.java b/src/main/java/io/odpf/firehose/sink/mongodb/MongoSink.java index b44e27f86..237eadff6 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/MongoSink.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/MongoSink.java @@ -3,7 +3,7 @@ import com.mongodb.bulk.BulkWriteError; import com.mongodb.client.model.WriteModel; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.mongodb.client.MongoSinkClient; import io.odpf.firehose.sink.mongodb.request.MongoRequestHandler; @@ -31,14 +31,14 @@ public class MongoSink extends AbstractSink { /** * Instantiates a new Mongo sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param sinkType the sink type * @param mongoRequestHandler the mongo request handler * @since 0.1 */ - public MongoSink(Instrumentation instrumentation, String sinkType, MongoRequestHandler mongoRequestHandler, + public MongoSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, MongoRequestHandler mongoRequestHandler, MongoSinkClient mongoSinkClient) { - super(instrumentation, sinkType); + super(firehoseInstrumentation, sinkType); this.mongoRequestHandler = mongoRequestHandler; this.mongoSinkClient = mongoSinkClient; } @@ -88,7 +88,7 @@ protected List execute() { */ @Override public void close() throws IOException { - getInstrumentation().logInfo("MongoDB connection closing"); + getFirehoseInstrumentation().logInfo("MongoDB connection closing"); this.mongoSinkClient.close(); } } diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/MongoSinkFactory.java b/src/main/java/io/odpf/firehose/sink/mongodb/MongoSinkFactory.java index 60641a92d..ce62fadac 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/MongoSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/MongoSinkFactory.java @@ -1,5 +1,7 @@ package io.odpf.firehose.sink.mongodb; +import io.odpf.depot.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.stencil.client.StencilClient; import com.mongodb.MongoClient; import com.mongodb.MongoClientOptions; @@ -7,8 +9,6 @@ import com.mongodb.ServerAddress; import io.odpf.firehose.config.MongoSinkConfig; import io.odpf.firehose.config.enums.SinkType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.serializer.MessageToJson; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.sink.mongodb.client.MongoSinkClient; @@ -39,20 +39,20 @@ public class MongoSinkFactory { */ public static Sink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, MongoSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, MongoSinkFactory.class); - logMongoConfig(mongoSinkConfig, instrumentation); - MongoRequestHandler mongoRequestHandler = new MongoRequestHandlerFactory(mongoSinkConfig, new Instrumentation(statsDReporter, MongoRequestHandlerFactory.class), + logMongoConfig(mongoSinkConfig, firehoseInstrumentation); + MongoRequestHandler mongoRequestHandler = new MongoRequestHandlerFactory(mongoSinkConfig, new FirehoseInstrumentation(statsDReporter, MongoRequestHandlerFactory.class), mongoSinkConfig.getSinkMongoPrimaryKey(), mongoSinkConfig.getSinkMongoInputMessageType(), new MessageToJson(stencilClient.getParser(mongoSinkConfig.getInputSchemaProtoClass()), mongoSinkConfig.isSinkMongoPreserveProtoFieldNamesEnable(), false) ).getRequestHandler(); - MongoClient mongoClient = buildMongoClient(mongoSinkConfig, instrumentation); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoSinkConfig, new Instrumentation(statsDReporter, MongoSinkClient.class), mongoClient); + MongoClient mongoClient = buildMongoClient(mongoSinkConfig, firehoseInstrumentation); + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoSinkConfig, new FirehoseInstrumentation(statsDReporter, MongoSinkClient.class), mongoClient); mongoSinkClient.prepare(); - instrumentation.logInfo("MONGO connection established"); + firehoseInstrumentation.logInfo("MONGO connection established"); - return new MongoSink(new Instrumentation(statsDReporter, MongoSink.class), SinkType.MONGODB.name().toLowerCase(), mongoRequestHandler, + return new MongoSink(new FirehoseInstrumentation(statsDReporter, MongoSink.class), SinkType.MONGODB.name().toLowerCase(), mongoRequestHandler, mongoSinkClient); } @@ -71,8 +71,8 @@ public static Sink create(Map configuration, StatsDReporter stat * @return the mongo client * @since 0.1 */ - private static MongoClient buildMongoClient(MongoSinkConfig mongoSinkConfig, Instrumentation instrumentation) { - List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoSinkConfig.getSinkMongoConnectionUrls(), instrumentation); + private static MongoClient buildMongoClient(MongoSinkConfig mongoSinkConfig, FirehoseInstrumentation firehoseInstrumentation) { + List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoSinkConfig.getSinkMongoConnectionUrls(), firehoseInstrumentation); MongoClientOptions options = MongoClientOptions.builder() .connectTimeout(mongoSinkConfig.getSinkMongoConnectTimeoutMs()) .serverSelectionTimeout(mongoSinkConfig.getSinkMongoServerSelectTimeoutMs()) @@ -105,7 +105,7 @@ private static MongoClient buildMongoClient(MongoSinkConfig mongoSinkConfig, Ins * * @since 0.1 */ - private static void logMongoConfig(MongoSinkConfig mongoSinkConfig, Instrumentation instrumentation) { + private static void logMongoConfig(MongoSinkConfig mongoSinkConfig, FirehoseInstrumentation firehoseInstrumentation) { String mongoConfig = String.format("\n\tMONGO connection urls: %s" + "\n\tMONGO Database name: %s" + "\n\tMONGO Primary Key: %s" @@ -130,6 +130,6 @@ private static void logMongoConfig(MongoSinkConfig mongoSinkConfig, Instrumentat mongoSinkConfig.getSinkMongoAuthUsername(), mongoSinkConfig.getSinkMongoAuthDB()); - instrumentation.logDebug(mongoConfig); + firehoseInstrumentation.logDebug(mongoConfig); } } diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClient.java b/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClient.java index af0a6b383..4c71750ed 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClient.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClient.java @@ -9,7 +9,7 @@ import com.mongodb.client.MongoDatabase; import com.mongodb.client.model.WriteModel; import io.odpf.firehose.config.MongoSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.AllArgsConstructor; import org.bson.Document; @@ -34,7 +34,7 @@ public class MongoSinkClient implements Closeable { private MongoCollection mongoCollection; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final List mongoRetryStatusCodeBlacklist; private final MongoClient mongoClient; private final MongoSinkConfig mongoSinkConfig; @@ -43,12 +43,12 @@ public class MongoSinkClient implements Closeable { * Instantiates a new Mongo sink client. * * @param mongoSinkConfig the mongo sink config - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @since 0.1 */ - public MongoSinkClient(MongoSinkConfig mongoSinkConfig, Instrumentation instrumentation, MongoClient mongoClient) { + public MongoSinkClient(MongoSinkConfig mongoSinkConfig, FirehoseInstrumentation firehoseInstrumentation, MongoClient mongoClient) { this.mongoSinkConfig = mongoSinkConfig; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.mongoClient = mongoClient; mongoRetryStatusCodeBlacklist = MongoSinkClientUtil.getStatusCodesAsList(mongoSinkConfig.getSinkMongoRetryStatusCodeBlacklist()); @@ -62,27 +62,27 @@ public void prepare() { String databaseName = mongoSinkConfig.getSinkMongoDBName(); String collectionName = mongoSinkConfig.getSinkMongoCollectionName(); - boolean doesDBExist = MongoSinkClientUtil.checkDatabaseExists(databaseName, mongoClient, instrumentation); + boolean doesDBExist = MongoSinkClientUtil.checkDatabaseExists(databaseName, mongoClient, firehoseInstrumentation); MongoDatabase database = mongoClient.getDatabase(databaseName); - boolean doesCollectionExist = MongoSinkClientUtil.checkCollectionExists(collectionName, database, instrumentation); + boolean doesCollectionExist = MongoSinkClientUtil.checkCollectionExists(collectionName, database, firehoseInstrumentation); if (!doesCollectionExist) { try { database.createCollection(collectionName); } catch (MongoCommandException e) { if (!doesDBExist) { - instrumentation.logError("Failed to create database"); + firehoseInstrumentation.logError("Failed to create database"); } - instrumentation.logError("Failed to create collection. Cause: " + e.getErrorMessage()); + firehoseInstrumentation.logError("Failed to create collection. Cause: " + e.getErrorMessage()); throw e; } if (!doesDBExist) { - instrumentation.logInfo("Database: " + databaseName + " was successfully created"); + firehoseInstrumentation.logInfo("Database: " + databaseName + " was successfully created"); } - instrumentation.logInfo("Collection: " + collectionName + " was successfully created"); + firehoseInstrumentation.logInfo("Collection: " + collectionName + " was successfully created"); } mongoCollection = database.getCollection(collectionName); - instrumentation.logInfo("Successfully connected to Mongo namespace : " + mongoCollection.getNamespace().getFullName()); + firehoseInstrumentation.logInfo("Successfully connected to Mongo namespace : " + mongoCollection.getNamespace().getFullName()); } /** @@ -103,7 +103,7 @@ public List processRequest(List> request) { logResults(mongoCollection.bulkWrite(request), request.size()); return Collections.emptyList(); } catch (MongoBulkWriteException writeException) { - instrumentation.logWarn("Bulk request failed"); + firehoseInstrumentation.logWarn("Bulk request failed"); List writeErrors = writeException.getWriteErrors(); logErrors(writeErrors); @@ -120,35 +120,35 @@ private void logResults(BulkWriteResult writeResult, int messageCount) { int totalInsertedCount = writeResult.getInsertedCount() + writeResult.getUpserts().size(); if (totalWriteCount == 0) { - instrumentation.logWarn("Bulk request failed"); + firehoseInstrumentation.logWarn("Bulk request failed"); } else if (totalWriteCount == messageCount) { - instrumentation.logInfo("Bulk request succeeded"); + firehoseInstrumentation.logInfo("Bulk request succeeded"); } else { - instrumentation.logWarn("Bulk request partially succeeded"); + firehoseInstrumentation.logWarn("Bulk request partially succeeded"); } if (totalWriteCount != messageCount) { - instrumentation.logWarn("Bulk request failures count: {}", failureCount); + firehoseInstrumentation.logWarn("Bulk request failures count: {}", failureCount); if (mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()) { for (int i = 0; i < failureCount; i++) { - instrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=Primary Key value not found"); + firehoseInstrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=Primary Key value not found"); } - instrumentation.logWarn("Some Messages were dropped because their Primary Key values had no matches"); + firehoseInstrumentation.logWarn("Some Messages were dropped because their Primary Key values had no matches"); } else { for (int i = 0; i < failureCount; i++) { - instrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL); + firehoseInstrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL); } } } if (writeResult.wasAcknowledged()) { - instrumentation.logInfo("Bulk Write operation was successfully acknowledged"); + firehoseInstrumentation.logInfo("Bulk Write operation was successfully acknowledged"); } else { - instrumentation.logWarn("Bulk Write operation was not acknowledged"); + firehoseInstrumentation.logWarn("Bulk Write operation was not acknowledged"); } - instrumentation.logInfo( + firehoseInstrumentation.logInfo( "Inserted Count = {}. Matched Count = {}. Deleted Count = {}. Updated Count = {}. Total Modified Count = {}", totalInsertedCount, writeResult.getMatchedCount(), @@ -157,13 +157,13 @@ private void logResults(BulkWriteResult writeResult, int messageCount) { totalWriteCount); for (int i = 0; i < totalInsertedCount; i++) { - instrumentation.incrementCounter(SINK_MONGO_INSERTED_TOTAL); + firehoseInstrumentation.incrementCounter(SINK_MONGO_INSERTED_TOTAL); } for (int i = 0; i < writeResult.getModifiedCount(); i++) { - instrumentation.incrementCounter(SINK_MONGO_UPDATED_TOTAL); + firehoseInstrumentation.incrementCounter(SINK_MONGO_UPDATED_TOTAL); } for (int i = 0; i < totalWriteCount; i++) { - instrumentation.incrementCounter(SINK_MONGO_MODIFIED_TOTAL); + firehoseInstrumentation.incrementCounter(SINK_MONGO_MODIFIED_TOTAL); } } @@ -182,12 +182,12 @@ private void logErrors(List writeErrors) { writeErrors.stream() .filter(writeError -> mongoRetryStatusCodeBlacklist.contains(writeError.getCode())) .forEach(writeError -> { - instrumentation.logWarn("Non-retriable error due to response status: {} is under blacklisted status code", writeError.getCode()); - instrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=" + writeError.getMessage()); - instrumentation.logInfo("Message dropped because of status code: " + writeError.getCode()); + firehoseInstrumentation.logWarn("Non-retriable error due to response status: {} is under blacklisted status code", writeError.getCode()); + firehoseInstrumentation.incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=" + writeError.getMessage()); + firehoseInstrumentation.logInfo("Message dropped because of status code: " + writeError.getCode()); }); - instrumentation.logWarn("Bulk request failed count: {}", writeErrors.size()); + firehoseInstrumentation.logWarn("Bulk request failed count: {}", writeErrors.size()); } @Override diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtil.java b/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtil.java index b24f34e2d..74e1326af 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtil.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtil.java @@ -3,7 +3,7 @@ import com.mongodb.MongoClient; import com.mongodb.MongoNamespace; import com.mongodb.client.MongoDatabase; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.experimental.UtilityClass; import java.util.ArrayList; @@ -39,16 +39,16 @@ static List getStatusCodesAsList(String mongoRetryStatusCodeBlacklist) * * @param databaseName the database name * @param mongoClient the mongo client - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @return true if database already exists, otherwise false */ - static boolean checkDatabaseExists(String databaseName, MongoClient mongoClient, Instrumentation instrumentation) { + static boolean checkDatabaseExists(String databaseName, MongoClient mongoClient, FirehoseInstrumentation firehoseInstrumentation) { MongoNamespace.checkDatabaseNameValidity(databaseName); boolean doesDBExist = true; if (!mongoClient.listDatabaseNames() .into(new ArrayList<>()) .contains(databaseName)) { - instrumentation.logInfo("Database: " + databaseName + " does not exist. Attempting to create database"); + firehoseInstrumentation.logInfo("Database: " + databaseName + " does not exist. Attempting to create database"); doesDBExist = false; } @@ -60,10 +60,10 @@ static boolean checkDatabaseExists(String databaseName, MongoClient mongoClient, * * @param collectionName the collection name * @param database the database - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @return true if collection already exists, otherwise false */ - static boolean checkCollectionExists(String collectionName, MongoDatabase database, Instrumentation instrumentation) { + static boolean checkCollectionExists(String collectionName, MongoDatabase database, FirehoseInstrumentation firehoseInstrumentation) { MongoNamespace.checkCollectionNameValidity(collectionName); boolean doesCollectionExist = true; @@ -71,7 +71,7 @@ static boolean checkCollectionExists(String collectionName, MongoDatabase databa .into(new ArrayList<>()) .contains(collectionName)) { doesCollectionExist = false; - instrumentation.logInfo("Collection: " + collectionName + " does not exist. Attempting to create collection"); + firehoseInstrumentation.logInfo("Collection: " + collectionName + " does not exist. Attempting to create collection"); } return doesCollectionExist; } diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactory.java b/src/main/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactory.java index 41860551b..fe24c25d3 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactory.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactory.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.MongoSinkConfig; import io.odpf.firehose.config.enums.MongoSinkMessageType; import io.odpf.firehose.config.enums.MongoSinkRequestType; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageToJson; import lombok.AllArgsConstructor; @@ -19,7 +19,7 @@ public class MongoRequestHandlerFactory { private final MongoSinkConfig mongoSinkConfig; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final String mongoPrimaryKey; private final MongoSinkMessageType messageType; private final MessageToJson jsonSerializer; @@ -46,7 +46,7 @@ public MongoRequestHandler getRequestHandler() { throw new IllegalArgumentException("KAFKA_RECORD_PARSER_MODE should be key/message"); } MongoSinkRequestType mongoSinkRequestType = mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable() ? UPDATE_ONLY : UPSERT; - instrumentation.logInfo("Mongo request mode: {}", mongoSinkRequestType); + firehoseInstrumentation.logInfo("Mongo request mode: {}", mongoSinkRequestType); if (mongoSinkRequestType == UPDATE_ONLY && mongoPrimaryKey == null) { throw new IllegalArgumentException("Primary Key cannot be null in Update-Only mode"); } diff --git a/src/main/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtil.java b/src/main/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtil.java index 302e6b2a3..c2cceb513 100644 --- a/src/main/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtil.java +++ b/src/main/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtil.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.mongodb.util; import com.mongodb.ServerAddress; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.experimental.UtilityClass; import java.util.ArrayList; @@ -24,7 +24,7 @@ public class MongoSinkFactoryUtil { * of all the MongoDB servers into a list, which is returned. * * @param mongoConnectionUrls the mongo connection urls - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @return the list of server addresses * @throws IllegalArgumentException if the environment variable SINK_MONGO_CONNECTION_URLS * is an empty string or not assigned any value by the user. @@ -32,7 +32,7 @@ public class MongoSinkFactoryUtil { * any or both of hostname/ IP address and the port * @since 0.1 */ - public static List getServerAddresses(String mongoConnectionUrls, Instrumentation instrumentation) { + public static List getServerAddresses(String mongoConnectionUrls, FirehoseInstrumentation firehoseInstrumentation) { if (mongoConnectionUrls != null && !mongoConnectionUrls.isEmpty()) { List mongoNodes = Arrays.asList(mongoConnectionUrls.trim().split(",")); List serverAddresses = new ArrayList<>(mongoNodes.size()); @@ -47,7 +47,7 @@ public static List getServerAddresses(String mongoConnectionUrls, }); return serverAddresses; } else { - instrumentation.logError("No connection URL found"); + firehoseInstrumentation.logError("No connection URL found"); throw new IllegalArgumentException("SINK_MONGO_CONNECTION_URLS is empty or null"); } } diff --git a/src/main/java/io/odpf/firehose/sink/prometheus/PromSink.java b/src/main/java/io/odpf/firehose/sink/prometheus/PromSink.java index e9a933896..0a28ffd41 100644 --- a/src/main/java/io/odpf/firehose/sink/prometheus/PromSink.java +++ b/src/main/java/io/odpf/firehose/sink/prometheus/PromSink.java @@ -1,13 +1,13 @@ package io.odpf.firehose.sink.prometheus; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.AbstractHttpSink; import io.odpf.firehose.sink.prometheus.request.PromRequest; import com.google.protobuf.DynamicMessage; import cortexpb.Cortex; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; import io.odpf.stencil.client.StencilClient; import org.apache.commons.io.IOUtils; import org.apache.http.HttpResponse; @@ -34,15 +34,15 @@ public class PromSink extends AbstractHttpSink { /** * Instantiates a new Prometheus sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param request the request * @param httpClient the http client * @param stencilClient the stencil client * @param retryStatusCodeRanges the retry status code ranges * @param requestLogStatusCodeRanges the request log status code ranges */ - public PromSink(Instrumentation instrumentation, PromRequest request, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { - super(instrumentation, "prometheus", httpClient, stencilClient, retryStatusCodeRanges, requestLogStatusCodeRanges); + public PromSink(FirehoseInstrumentation firehoseInstrumentation, PromRequest request, HttpClient httpClient, StencilClient stencilClient, Map retryStatusCodeRanges, Map requestLogStatusCodeRanges) { + super(firehoseInstrumentation, "prometheus", httpClient, stencilClient, retryStatusCodeRanges, requestLogStatusCodeRanges); this.request = request; } @@ -63,8 +63,8 @@ protected void prepare(List messages) throws DeserializerException, IOE } protected void captureMessageDropCount(HttpResponse response, List contentStringList) { - getInstrumentation().captureCount(SINK_MESSAGES_DROP_TOTAL, contentStringList.size(), "cause= " + statusCode(response)); - getInstrumentation().logInfo("Message dropped because of status code: " + statusCode(response)); + getFirehoseInstrumentation().captureCount(SINK_MESSAGES_DROP_TOTAL, (long) contentStringList.size(), "cause= " + statusCode(response)); + getFirehoseInstrumentation().logInfo("Message dropped because of status code: " + statusCode(response)); } /** diff --git a/src/main/java/io/odpf/firehose/sink/prometheus/PromSinkFactory.java b/src/main/java/io/odpf/firehose/sink/prometheus/PromSinkFactory.java index f355b8867..22628d2e0 100644 --- a/src/main/java/io/odpf/firehose/sink/prometheus/PromSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/prometheus/PromSinkFactory.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.prometheus; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.PromSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.prometheus.request.PromRequest; import io.odpf.firehose.sink.prometheus.request.PromRequestCreator; @@ -39,16 +39,16 @@ public static AbstractSink create(Map configuration, StatsDRepor PromSinkConfig promSinkConfig = ConfigFactory.create(PromSinkConfig.class, configuration); String promSchemaProtoClass = promSinkConfig.getInputSchemaProtoClass(); - Instrumentation instrumentation = new Instrumentation(statsDReporter, PromSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, PromSinkFactory.class); CloseableHttpClient closeableHttpClient = newHttpClient(promSinkConfig); - instrumentation.logInfo("HTTP connection established"); + firehoseInstrumentation.logInfo("HTTP connection established"); Parser protoParser = stencilClient.getParser(promSchemaProtoClass); PromRequest request = new PromRequestCreator(statsDReporter, promSinkConfig, protoParser).createRequest(); - return new PromSink(new Instrumentation(statsDReporter, PromSink.class), + return new PromSink(new FirehoseInstrumentation(statsDReporter, PromSink.class), request, closeableHttpClient, stencilClient, diff --git a/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequest.java b/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequest.java index f153db2ba..fb13d7851 100644 --- a/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequest.java +++ b/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequest.java @@ -3,7 +3,7 @@ import cortexpb.Cortex; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.prometheus.builder.HeaderBuilder; import io.odpf.firehose.sink.prometheus.builder.RequestEntityBuilder; import io.odpf.firehose.sink.prometheus.builder.WriteRequestBuilder; @@ -21,7 +21,7 @@ * Prometheus request create one HttpPost per batch messages. */ public class PromRequest { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private WriteRequestBuilder writeRequestBuilder; private String url; private RequestEntityBuilder requestEntityBuilder; @@ -31,15 +31,15 @@ public class PromRequest { /** * Instantiates a new Prometheus request. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param headerBuilder the header builder * @param url the url * @param requestEntityBuilder the request entity builder * @param writeRequestBuilder the writeRequest builder */ - public PromRequest(Instrumentation instrumentation, HeaderBuilder headerBuilder, String url, + public PromRequest(FirehoseInstrumentation firehoseInstrumentation, HeaderBuilder headerBuilder, String url, RequestEntityBuilder requestEntityBuilder, WriteRequestBuilder writeRequestBuilder) { - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.writeRequestBuilder = writeRequestBuilder; this.headerBuilder = headerBuilder; this.url = url; diff --git a/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreator.java b/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreator.java index 6290847cc..35510fdc8 100644 --- a/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreator.java +++ b/src/main/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreator.java @@ -1,9 +1,9 @@ package io.odpf.firehose.sink.prometheus.request; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.PromSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.prometheus.builder.HeaderBuilder; import io.odpf.firehose.sink.prometheus.builder.RequestEntityBuilder; import io.odpf.firehose.sink.prometheus.builder.TimeSeriesBuilder; @@ -22,9 +22,9 @@ public class PromRequestCreator { /** * Instantiates a new Prometheus request creator. * - * @param statsDReporter the statsd reporter - * @param promSinkConfig the configuration for prometheus sink - * @param protoParser the proto parser + * @param statsDReporter the statsd reporter + * @param promSinkConfig the configuration for prometheus sink + * @param protoParser the proto parser */ public PromRequestCreator(StatsDReporter statsDReporter, PromSinkConfig promSinkConfig, Parser protoParser) { this.statsDReporter = statsDReporter; @@ -43,7 +43,7 @@ public PromRequest createRequest() { String baseUrl = promSinkConfig.getSinkPromServiceUrl(); RequestEntityBuilder requestEntityBuilder = new RequestEntityBuilder(); - return new PromRequest(new Instrumentation(statsDReporter, PromRequest.class), + return new PromRequest(new FirehoseInstrumentation(statsDReporter, PromRequest.class), headerBuilder, baseUrl, requestEntityBuilder, body); } diff --git a/src/main/java/io/odpf/firehose/sink/redis/RedisSink.java b/src/main/java/io/odpf/firehose/sink/redis/RedisSink.java index 7e5f735e6..808631c1a 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/RedisSink.java +++ b/src/main/java/io/odpf/firehose/sink/redis/RedisSink.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.redis; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.redis.client.RedisClient; import io.odpf.firehose.sink.redis.exception.NoResponseException; @@ -19,12 +19,12 @@ public class RedisSink extends AbstractSink { /** * Instantiates a new Redis sink. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param sinkType the sink type * @param redisClient the redis client */ - public RedisSink(Instrumentation instrumentation, String sinkType, RedisClient redisClient) { - super(instrumentation, sinkType); + public RedisSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType, RedisClient redisClient) { + super(firehoseInstrumentation, sinkType); this.redisClient = redisClient; } @@ -51,7 +51,7 @@ protected List execute() throws NoResponseException { @Override public void close() { - getInstrumentation().logInfo("Redis connection closing"); + getFirehoseInstrumentation().logInfo("Redis connection closing"); redisClient.close(); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/RedisSinkFactory.java b/src/main/java/io/odpf/firehose/sink/redis/RedisSinkFactory.java index 1c53f6f9f..741fe695d 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/RedisSinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/redis/RedisSinkFactory.java @@ -1,9 +1,9 @@ package io.odpf.firehose.sink.redis; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.AbstractSink; import io.odpf.firehose.sink.redis.client.RedisClient; import io.odpf.firehose.sink.redis.client.RedisClientFactory; @@ -31,7 +31,7 @@ public class RedisSinkFactory { */ public static AbstractSink create(Map configuration, StatsDReporter statsDReporter, StencilClient stencilClient) { RedisSinkConfig redisSinkConfig = ConfigFactory.create(RedisSinkConfig.class, configuration); - Instrumentation instrumentation = new Instrumentation(statsDReporter, RedisSinkFactory.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, RedisSinkFactory.class); String redisConfig = String.format("\n\tredis.urls = %s\n\tredis.key.template = %s\n\tredis.sink.type = %s" + "\n\tredis.list.data.proto.index = %s\n\tredis.ttl.type = %s\n\tredis.ttl.value = %d", redisSinkConfig.getSinkRedisUrls(), @@ -40,12 +40,12 @@ public static AbstractSink create(Map configuration, StatsDRepor redisSinkConfig.getSinkRedisListDataProtoIndex(), redisSinkConfig.getSinkRedisTtlType().toString(), redisSinkConfig.getSinkRedisTtlValue()); - instrumentation.logDebug(redisConfig); - instrumentation.logInfo("Redis server type = {}", redisSinkConfig.getSinkRedisDeploymentType()); + firehoseInstrumentation.logDebug(redisConfig); + firehoseInstrumentation.logInfo("Redis server type = {}", redisSinkConfig.getSinkRedisDeploymentType()); RedisClientFactory redisClientFactory = new RedisClientFactory(statsDReporter, redisSinkConfig, stencilClient); RedisClient client = redisClientFactory.getClient(); - instrumentation.logInfo("Connection to redis established successfully"); - return new RedisSink(new Instrumentation(statsDReporter, RedisSink.class), "redis", client); + firehoseInstrumentation.logInfo("Connection to redis established successfully"); + return new RedisSink(new FirehoseInstrumentation(statsDReporter, RedisSink.class), "redis", client); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/client/RedisClientFactory.java b/src/main/java/io/odpf/firehose/sink/redis/client/RedisClientFactory.java index 62604b21e..7d3fac23e 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/client/RedisClientFactory.java +++ b/src/main/java/io/odpf/firehose/sink/redis/client/RedisClientFactory.java @@ -1,12 +1,10 @@ package io.odpf.firehose.sink.redis.client; - - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.config.enums.RedisSinkDeploymentType; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.redis.parsers.RedisParser; import io.odpf.firehose.sink.redis.parsers.RedisParserFactory; @@ -62,7 +60,7 @@ private RedisStandaloneClient getRedisStandaloneClient(RedisParser redisParser, } catch (IllegalArgumentException e) { throw new ConfigurationException(String.format("Invalid url for redis standalone: %s", redisSinkConfig.getSinkRedisUrls())); } - return new RedisStandaloneClient(new Instrumentation(statsDReporter, RedisStandaloneClient.class), redisParser, redisTTL, jedis); + return new RedisStandaloneClient(new FirehoseInstrumentation(statsDReporter, RedisStandaloneClient.class), redisParser, redisTTL, jedis); } private RedisClusterClient getRedisClusterClient(RedisParser redisParser, RedisTtl redisTTL) { @@ -76,6 +74,6 @@ private RedisClusterClient getRedisClusterClient(RedisParser redisParser, RedisT throw new ConfigurationException(String.format("Invalid url(s) for redis cluster: %s", redisSinkConfig.getSinkRedisUrls())); } JedisCluster jedisCluster = new JedisCluster(nodes); - return new RedisClusterClient(new Instrumentation(statsDReporter, RedisClusterClient.class), redisParser, redisTTL, jedisCluster); + return new RedisClusterClient(new FirehoseInstrumentation(statsDReporter, RedisClusterClient.class), redisParser, redisTTL, jedisCluster); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/client/RedisClusterClient.java b/src/main/java/io/odpf/firehose/sink/redis/client/RedisClusterClient.java index e1f187f5c..f72beb535 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/client/RedisClusterClient.java +++ b/src/main/java/io/odpf/firehose/sink/redis/client/RedisClusterClient.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.redis.client; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.parsers.RedisParser; import io.odpf.firehose.sink.redis.ttl.RedisTtl; @@ -15,7 +15,7 @@ */ public class RedisClusterClient implements RedisClient { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private RedisParser redisParser; private RedisTtl redisTTL; private JedisCluster jedisCluster; @@ -24,13 +24,13 @@ public class RedisClusterClient implements RedisClient { /** * Instantiates a new Redis cluster client. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param redisParser the redis parser * @param redisTTL the redis ttl * @param jedisCluster the jedis cluster */ - public RedisClusterClient(Instrumentation instrumentation, RedisParser redisParser, RedisTtl redisTTL, JedisCluster jedisCluster) { - this.instrumentation = instrumentation; + public RedisClusterClient(FirehoseInstrumentation firehoseInstrumentation, RedisParser redisParser, RedisTtl redisTTL, JedisCluster jedisCluster) { + this.firehoseInstrumentation = firehoseInstrumentation; this.redisParser = redisParser; this.redisTTL = redisTTL; this.jedisCluster = jedisCluster; @@ -49,7 +49,7 @@ public List execute() { @Override public void close() { - instrumentation.logInfo("Closing Jedis client"); + firehoseInstrumentation.logInfo("Closing Jedis client"); jedisCluster.close(); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClient.java b/src/main/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClient.java index 81846b6df..ba6498467 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClient.java +++ b/src/main/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClient.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.redis.client; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.exception.NoResponseException; import io.odpf.firehose.sink.redis.parsers.RedisParser; @@ -18,7 +18,7 @@ */ public class RedisStandaloneClient implements RedisClient { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private RedisParser redisParser; private RedisTtl redisTTL; private Jedis jedis; @@ -27,13 +27,13 @@ public class RedisStandaloneClient implements RedisClient { /** * Instantiates a new Redis standalone client. * - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param redisParser the redis parser * @param redisTTL the redis ttl * @param jedis the jedis */ - public RedisStandaloneClient(Instrumentation instrumentation, RedisParser redisParser, RedisTtl redisTTL, Jedis jedis) { - this.instrumentation = instrumentation; + public RedisStandaloneClient(FirehoseInstrumentation firehoseInstrumentation, RedisParser redisParser, RedisTtl redisTTL, Jedis jedis) { + this.firehoseInstrumentation = firehoseInstrumentation; this.redisParser = redisParser; this.redisTTL = redisTTL; this.jedis = jedis; @@ -51,7 +51,7 @@ public void prepare(List messages) { @Override public List execute() { Response> responses = jedisPipelined.exec(); - instrumentation.logDebug("jedis responses: {}", responses); + firehoseInstrumentation.logDebug("jedis responses: {}", responses); jedisPipelined.sync(); if (responses.get() == null || responses.get().isEmpty()) { throw new NoResponseException(); @@ -61,7 +61,7 @@ public List execute() { @Override public void close() { - instrumentation.logInfo("Closing Jedis client"); + firehoseInstrumentation.logInfo("Closing Jedis client"); jedis.close(); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntry.java b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntry.java index 2fa5f3e28..fac75fc9e 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntry.java +++ b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntry.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.Getter; @@ -17,18 +17,18 @@ public class RedisHashSetFieldEntry implements RedisDataEntry { private String key; private String field; private String value; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Override public void pushMessage(Pipeline jedisPipelined, RedisTtl redisTTL) { - getInstrumentation().logDebug("key: {}, field: {}, value: {}", getKey(), getField(), getValue()); + getFirehoseInstrumentation().logDebug("key: {}, field: {}, value: {}", getKey(), getField(), getValue()); jedisPipelined.hset(getKey(), getField(), getValue()); redisTTL.setTtl(jedisPipelined, getKey()); } @Override public void pushMessage(JedisCluster jedisCluster, RedisTtl redisTTL) { - getInstrumentation().logDebug("key: {}, field: {}, value: {}", getKey(), getField(), getValue()); + getFirehoseInstrumentation().logDebug("key: {}, field: {}, value: {}", getKey(), getField(), getValue()); jedisCluster.hset(getKey(), getField(), getValue()); redisTTL.setTtl(jedisCluster, getKey()); } diff --git a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntry.java b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntry.java index 62ecaf5a6..18d23f059 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntry.java +++ b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntry.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; @@ -10,23 +10,23 @@ @AllArgsConstructor @Getter -@EqualsAndHashCode +@EqualsAndHashCode(exclude = "firehoseInstrumentation") public class RedisKeyValueEntry implements RedisDataEntry { private String key; private String value; - @EqualsAndHashCode.Exclude private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Override public void pushMessage(Pipeline jedisPipelined, RedisTtl redisTTL) { - instrumentation.logDebug("key: {}, value: {}", key, value); + firehoseInstrumentation.logDebug("key: {}, value: {}", key, value); jedisPipelined.set(key, value); redisTTL.setTtl(jedisPipelined, key); } @Override public void pushMessage(JedisCluster jedisCluster, RedisTtl redisTTL) { - instrumentation.logDebug("key: {}, value: {}", key, value); + firehoseInstrumentation.logDebug("key: {}, value: {}", key, value); jedisCluster.set(key, value); redisTTL.setTtl(jedisCluster, key); diff --git a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntry.java b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntry.java index f63fbf284..c6c9ee163 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntry.java +++ b/src/main/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntry.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.Getter; @@ -15,18 +15,18 @@ public class RedisListEntry implements RedisDataEntry { private String key; private String value; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Override public void pushMessage(Pipeline jedisPipelined, RedisTtl redisTTL) { - getInstrumentation().logDebug("key: {}, value: {}", getKey(), getValue()); + getFirehoseInstrumentation().logDebug("key: {}, value: {}", getKey(), getValue()); jedisPipelined.lpush(getKey(), getValue()); redisTTL.setTtl(jedisPipelined, getKey()); } @Override public void pushMessage(JedisCluster jedisCluster, RedisTtl redisTTL) { - getInstrumentation().logDebug("key: {}, value: {}", getKey(), getValue()); + getFirehoseInstrumentation().logDebug("key: {}, value: {}", getKey(), getValue()); jedisCluster.lpush(getKey(), getValue()); redisTTL.setTtl(jedisCluster, getKey()); } diff --git a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParser.java b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParser.java index 6447e169a..3478e922f 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParser.java +++ b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParser.java @@ -1,10 +1,9 @@ package io.odpf.firehose.sink.redis.parsers; - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisHashSetFieldEntry; @@ -43,7 +42,7 @@ public List parse(Message message) { String redisKey = parseTemplate(parsedMessage, redisSinkConfig.getSinkRedisKeyTemplate()); List messageEntries = new ArrayList<>(); Map protoToFieldMap = protoToFieldMapper.getFields(getPayload(message)); - protoToFieldMap.forEach((key, value) -> messageEntries.add(new RedisHashSetFieldEntry(redisKey, parseTemplate(parsedMessage, key), String.valueOf(value), new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)))); + protoToFieldMap.forEach((key, value) -> messageEntries.add(new RedisHashSetFieldEntry(redisKey, parseTemplate(parsedMessage, key), String.valueOf(value), new FirehoseInstrumentation(statsDReporter, RedisHashSetFieldEntry.class)))); return messageEntries; } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParser.java b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParser.java index b68b18999..f5447fe78 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParser.java +++ b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParser.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.redis.parsers; import com.google.protobuf.DynamicMessage; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisKeyValueEntry; import io.odpf.stencil.Parser; @@ -30,8 +30,8 @@ public List parse(Message message) { if (protoIndex == null) { throw new IllegalArgumentException("Please provide SINK_REDIS_KEY_VALUE_DATA_PROTO_INDEX in key value sink"); } - Instrumentation instrumentation = new Instrumentation(statsDReporter, RedisKeyValueEntry.class); - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(redisKey, getDataByFieldNumber(parsedMessage, protoIndex).toString(), instrumentation); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, RedisKeyValueEntry.class); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(redisKey, getDataByFieldNumber(parsedMessage, protoIndex).toString(), firehoseInstrumentation); return Collections.singletonList(redisKeyValueEntry); } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisListParser.java b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisListParser.java index 4f3c3bbe1..760c26384 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisListParser.java +++ b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisListParser.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.redis.parsers; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisListEntry; import com.google.protobuf.DynamicMessage; @@ -42,7 +42,7 @@ public List parse(Message message) { throw new IllegalArgumentException("Please provide SINK_REDIS_LIST_DATA_PROTO_INDEX in list sink"); } List messageEntries = new ArrayList<>(); - messageEntries.add(new RedisListEntry(redisKey, getDataByFieldNumber(parsedMessage, protoIndex).toString(), new Instrumentation(statsDReporter, RedisListEntry.class))); + messageEntries.add(new RedisListEntry(redisKey, getDataByFieldNumber(parsedMessage, protoIndex).toString(), new FirehoseInstrumentation(statsDReporter, RedisListEntry.class))); return messageEntries; } } diff --git a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactory.java b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactory.java index 6c9fca79f..edfb62e79 100644 --- a/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactory.java +++ b/src/main/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactory.java @@ -1,8 +1,7 @@ package io.odpf.firehose.sink.redis.parsers; - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.stencil.Parser; diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/BackOff.java b/src/main/java/io/odpf/firehose/sinkdecorator/BackOff.java index cd4a17fb5..0d157d47d 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/BackOff.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/BackOff.java @@ -1,20 +1,20 @@ package io.odpf.firehose.sinkdecorator; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.AllArgsConstructor; @AllArgsConstructor public class BackOff { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; public void inMilliSeconds(long milliseconds) { try { Thread.sleep(milliseconds); } catch (InterruptedException e) { - instrumentation.captureNonFatalError(e, "Backoff thread sleep for {} milliseconds interrupted : {} {}", - milliseconds, e.getClass(), e.getMessage()); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, "Backoff thread sleep for {} milliseconds interrupted : {} {}", + milliseconds, e.getClass(), e.getMessage()); } } } diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProvider.java b/src/main/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProvider.java index 6d9a1c771..080f958f4 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProvider.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProvider.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sinkdecorator; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import static io.odpf.firehose.metrics.Metrics.RETRY_SLEEP_TIME_MILLISECONDS; import static java.lang.Math.toIntExact; @@ -17,7 +17,7 @@ public class ExponentialBackOffProvider implements BackOffProvider { private final int initialExpiryTimeInMs; private final int backoffRate; private final int maximumExpiryTimeInMS; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private final BackOff backOff; /** @@ -26,23 +26,23 @@ public class ExponentialBackOffProvider implements BackOffProvider { * @param initialExpiryTimeInMs the initial expiry time in ms * @param backoffRate the backoff rate * @param maximumExpiryTimeInMS the maximum expiry time in ms - * @param instrumentation the instrumentation + * @param firehoseInstrumentation the instrumentation * @param backOff the back off */ public ExponentialBackOffProvider(int initialExpiryTimeInMs, int backoffRate, int maximumExpiryTimeInMS, - Instrumentation instrumentation, BackOff backOff) { + FirehoseInstrumentation firehoseInstrumentation, BackOff backOff) { this.initialExpiryTimeInMs = initialExpiryTimeInMs; this.backoffRate = backoffRate; this.maximumExpiryTimeInMS = maximumExpiryTimeInMS; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.backOff = backOff; } @Override public void backOff(int attemptCount) { long sleepTime = this.calculateDelay(attemptCount); - instrumentation.logWarn("backing off for {} milliseconds ", sleepTime); - instrumentation.captureSleepTime(RETRY_SLEEP_TIME_MILLISECONDS, toIntExact(sleepTime)); + firehoseInstrumentation.logWarn("backing off for {} milliseconds ", sleepTime); + firehoseInstrumentation.captureSleepTime(RETRY_SLEEP_TIME_MILLISECONDS, toIntExact(sleepTime)); backOff.inMilliSeconds(sleepTime); } diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/SinkFinal.java b/src/main/java/io/odpf/firehose/sinkdecorator/SinkFinal.java index d00784d91..65a62f3b2 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/SinkFinal.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/SinkFinal.java @@ -2,7 +2,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.Sink; @@ -10,7 +10,7 @@ import java.util.List; public class SinkFinal extends SinkDecorator { - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; /** * Instantiates a new Sink decorator. @@ -18,17 +18,17 @@ public class SinkFinal extends SinkDecorator { * @param sink wrapped sink object */ - public SinkFinal(Sink sink, Instrumentation instrumentation) { + public SinkFinal(Sink sink, FirehoseInstrumentation firehoseInstrumentation) { super(sink); - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; } @Override public List pushMessage(List inputMessages) throws IOException, DeserializerException { List failedMessages = super.pushMessage(inputMessages); if (failedMessages.size() > 0) { - instrumentation.logInfo("Ignoring messages {}", failedMessages.size()); - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.IGNORED, failedMessages.size()); + firehoseInstrumentation.logInfo("Ignoring messages {}", failedMessages.size()); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.IGNORED, failedMessages.size()); } return failedMessages; } diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithDlq.java b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithDlq.java index b396d49bd..e7db335ed 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithDlq.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithDlq.java @@ -5,7 +5,7 @@ import io.odpf.firehose.error.ErrorHandler; import io.odpf.firehose.error.ErrorScope; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.sink.dlq.DlqWriter; @@ -30,14 +30,14 @@ public class SinkWithDlq extends SinkDecorator { private final DlqConfig dlqConfig; private final ErrorHandler errorHandler; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; - public SinkWithDlq(Sink sink, DlqWriter writer, BackOffProvider backOffProvider, DlqConfig dlqConfig, ErrorHandler errorHandler, Instrumentation instrumentation) { + public SinkWithDlq(Sink sink, DlqWriter writer, BackOffProvider backOffProvider, DlqConfig dlqConfig, ErrorHandler errorHandler, FirehoseInstrumentation firehoseInstrumentation) { super(sink); this.writer = writer; this.backOffProvider = backOffProvider; this.errorHandler = errorHandler; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.dlqConfig = dlqConfig; } @@ -78,24 +78,24 @@ private List doDLQ(List messages) throws IOException { List retryQueueMessages = new LinkedList<>(messages); retryQueueMessages.forEach(m -> { m.setDefaultErrorIfNotPresent(); - instrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, m.getErrorInfo().getErrorType(), 1); + firehoseInstrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, m.getErrorInfo().getErrorType(), 1); }); int attemptCount = 1; while (attemptCount <= this.dlqConfig.getDlqRetryMaxAttempts() && !retryQueueMessages.isEmpty()) { - instrumentation.incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); + firehoseInstrumentation.incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); retryQueueMessages = writer.write(retryQueueMessages); retryQueueMessages.forEach(message -> Optional.ofNullable(message.getErrorInfo()) .flatMap(errorInfo -> Optional.ofNullable(errorInfo.getException())) - .ifPresent(e -> instrumentation.captureDLQErrors(message, e))); + .ifPresent(e -> firehoseInstrumentation.captureDLQErrors(message, e))); backOff(retryQueueMessages, attemptCount); attemptCount++; } if (!retryQueueMessages.isEmpty()) { - instrumentation.logInfo("failed to be processed by DLQ messages: {}", retryQueueMessages.size()); + firehoseInstrumentation.logInfo("failed to be processed by DLQ messages: {}", retryQueueMessages.size()); } - instrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - retryQueueMessages.size()); - retryQueueMessages.forEach(m -> instrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1)); - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, messages.size() - retryQueueMessages.size()); + firehoseInstrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - retryQueueMessages.size()); + retryQueueMessages.forEach(m -> firehoseInstrumentation.captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1)); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, messages.size() - retryQueueMessages.size()); return retryQueueMessages; } diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java index 0681bc87b..60332800c 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java @@ -6,10 +6,10 @@ import io.odpf.firehose.error.ErrorHandler; import io.odpf.firehose.error.ErrorScope; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.Sink; -import io.odpf.firehose.sink.log.KeyOrMessageParser; +import io.odpf.firehose.sink.common.KeyOrMessageParser; import java.io.IOException; import java.util.ArrayList; @@ -26,15 +26,15 @@ public class SinkWithRetry extends SinkDecorator { private final BackOffProvider backOffProvider; - private final Instrumentation instrumentation; + private final FirehoseInstrumentation firehoseInstrumentation; private final AppConfig appConfig; private final KeyOrMessageParser parser; private final ErrorHandler errorHandler; - public SinkWithRetry(Sink sink, BackOffProvider backOffProvider, Instrumentation instrumentation, AppConfig appConfig, KeyOrMessageParser parser, ErrorHandler errorHandler) { + public SinkWithRetry(Sink sink, BackOffProvider backOffProvider, FirehoseInstrumentation firehoseInstrumentation, AppConfig appConfig, KeyOrMessageParser parser, ErrorHandler errorHandler) { super(sink); this.backOffProvider = backOffProvider; - this.instrumentation = instrumentation; + this.firehoseInstrumentation = firehoseInstrumentation; this.appConfig = appConfig; this.parser = parser; this.errorHandler = errorHandler; @@ -64,12 +64,12 @@ public List pushMessage(List inputMessages) throws IOException } private void logDebug(List messageList) throws IOException { - if (instrumentation.isDebugEnabled()) { + if (firehoseInstrumentation.isDebugEnabled()) { List serializedBody = new ArrayList<>(); for (Message message : messageList) { serializedBody.add(parser.parse(message)); } - instrumentation.logDebug("Retry failed messages: \n{}", serializedBody.toString()); + firehoseInstrumentation.logDebug("Retry failed messages: \n{}", serializedBody.toString()); } } @@ -82,24 +82,24 @@ private void backOff(List messageList, int attemptCount) { private List doRetry(List messages) throws IOException { List retryMessages = new LinkedList<>(messages); - instrumentation.logInfo("Maximum retry attempts: {}", appConfig.getRetryMaxAttempts()); + firehoseInstrumentation.logInfo("Maximum retry attempts: {}", appConfig.getRetryMaxAttempts()); retryMessages.forEach(m -> { m.setDefaultErrorIfNotPresent(); - instrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, m.getErrorInfo().getErrorType(), 1); + firehoseInstrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, m.getErrorInfo().getErrorType(), 1); }); int attemptCount = 1; while ((attemptCount <= appConfig.getRetryMaxAttempts() && !retryMessages.isEmpty()) || (appConfig.getRetryMaxAttempts() == Integer.MAX_VALUE && !retryMessages.isEmpty())) { - instrumentation.incrementCounter(RETRY_ATTEMPTS_TOTAL); - instrumentation.logInfo("Retrying messages attempt count: {}, Number of messages: {}", attemptCount, messages.size()); + firehoseInstrumentation.incrementCounter(RETRY_ATTEMPTS_TOTAL); + firehoseInstrumentation.logInfo("Retrying messages attempt count: {}, Number of messages: {}", attemptCount, messages.size()); logDebug(retryMessages); retryMessages = super.pushMessage(retryMessages); backOff(retryMessages, attemptCount); attemptCount++; } - instrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - retryMessages.size()); - retryMessages.forEach(m -> instrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1)); + firehoseInstrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, messages.size() - retryMessages.size()); + retryMessages.forEach(m -> firehoseInstrumentation.captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, m.getErrorInfo().getErrorType(), 1)); return retryMessages; } diff --git a/src/main/java/io/odpf/firehose/utils/ConsumerRebalancer.java b/src/main/java/io/odpf/firehose/utils/ConsumerRebalancer.java index 19b3035cd..44b5998ba 100644 --- a/src/main/java/io/odpf/firehose/utils/ConsumerRebalancer.java +++ b/src/main/java/io/odpf/firehose/utils/ConsumerRebalancer.java @@ -1,6 +1,6 @@ package io.odpf.firehose.utils; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import lombok.AllArgsConstructor; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.common.TopicPartition; @@ -14,7 +14,7 @@ @AllArgsConstructor public class ConsumerRebalancer implements ConsumerRebalanceListener { - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; /** * Function to run On partitions revoked. @@ -23,7 +23,7 @@ public class ConsumerRebalancer implements ConsumerRebalanceListener { */ @Override public void onPartitionsRevoked(Collection partitions) { - instrumentation.logWarn("Partitions Revoked {}", Arrays.toString(partitions.toArray())); + firehoseInstrumentation.logWarn("Partitions Revoked {}", Arrays.toString(partitions.toArray())); } /** @@ -33,6 +33,6 @@ public void onPartitionsRevoked(Collection partitions) { */ @Override public void onPartitionsAssigned(Collection partitions) { - instrumentation.logInfo("Partitions Assigned {}", Arrays.toString(partitions.toArray())); + firehoseInstrumentation.logInfo("Partitions Assigned {}", Arrays.toString(partitions.toArray())); } } diff --git a/src/main/java/io/odpf/firehose/utils/KafkaUtils.java b/src/main/java/io/odpf/firehose/utils/KafkaUtils.java index b214a064e..4654e5751 100644 --- a/src/main/java/io/odpf/firehose/utils/KafkaUtils.java +++ b/src/main/java/io/odpf/firehose/utils/KafkaUtils.java @@ -1,10 +1,10 @@ package io.odpf.firehose.utils; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.DlqKafkaProducerConfig; import io.odpf.firehose.config.KafkaConsumerConfig; import io.odpf.firehose.consumer.kafka.FirehoseKafkaConsumer; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.parser.KafkaEnvironmentVariables; import io.opentracing.Tracer; import io.opentracing.contrib.kafka.TracingKafkaConsumer; @@ -40,10 +40,10 @@ public class KafkaUtils { * @param statsdReporter the statsd reporter */ public static void configureSubscription(KafkaConsumerConfig config, KafkaConsumer kafkaConsumer, StatsDReporter statsdReporter) { - Instrumentation instrumentation = new Instrumentation(statsdReporter, KafkaUtils.class); + FirehoseInstrumentation firehoseInstrumentation = new FirehoseInstrumentation(statsdReporter, KafkaUtils.class); Pattern subscriptionTopicPattern = Pattern.compile(config.getSourceKafkaTopic()); - instrumentation.logInfo("consumer subscribed using pattern: {}", subscriptionTopicPattern); - kafkaConsumer.subscribe(subscriptionTopicPattern, new ConsumerRebalancer(new Instrumentation(statsdReporter, ConsumerRebalancer.class))); + firehoseInstrumentation.logInfo("consumer subscribed using pattern: {}", subscriptionTopicPattern); + kafkaConsumer.subscribe(subscriptionTopicPattern, new ConsumerRebalancer(new FirehoseInstrumentation(statsdReporter, ConsumerRebalancer.class))); } public static Map getConfig(KafkaConsumerConfig config, Map extraParameters) { @@ -83,7 +83,7 @@ public static FirehoseKafkaConsumer createConsumer(KafkaConsumerConfig config, M return new FirehoseKafkaConsumer( tracingKafkaConsumer, config, - new Instrumentation(statsDReporter, FirehoseKafkaConsumer.class)); + new FirehoseInstrumentation(statsDReporter, FirehoseKafkaConsumer.class)); } /** diff --git a/src/test/java/io/odpf/firehose/config/HttpSinkDataFormatTypeConverterTest.java b/src/test/java/io/odpf/firehose/config/HttpSinkDataFormatTypeConverterTest.java index c15274b7a..50fdda264 100644 --- a/src/test/java/io/odpf/firehose/config/HttpSinkDataFormatTypeConverterTest.java +++ b/src/test/java/io/odpf/firehose/config/HttpSinkDataFormatTypeConverterTest.java @@ -6,7 +6,7 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class HttpSinkDataFormatTypeConverterTest { diff --git a/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java b/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java index e86a30edf..4b1b6c740 100644 --- a/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java +++ b/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java @@ -5,7 +5,7 @@ import io.odpf.firehose.consumer.kafka.FirehoseKafkaConsumer; import io.odpf.firehose.consumer.kafka.OffsetManager; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import org.aeonbits.owner.ConfigFactory; import org.apache.kafka.clients.consumer.OffsetAndMetadata; @@ -36,9 +36,9 @@ public void shouldCommitToKafka() { }}; FirehoseKafkaConsumer consumer = Mockito.mock(FirehoseKafkaConsumer.class); KafkaConsumerConfig config = ConfigFactory.create(KafkaConsumerConfig.class, new HashMap<>()); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); OffsetManager offsetManager = new OffsetManager(); - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, consumer, config, instrumentation); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, consumer, config, firehoseInstrumentation); List messages = new ArrayList() {{ add(createMessage("testing", 1, 1)); add(createMessage("testing", 1, 2)); @@ -69,7 +69,7 @@ public void shouldCommitToKafkaWithSinkManagesOwnOffsets() { add(createMessage("testing3", 1, 3)); }}; OffsetManager offsetManager = new OffsetManager(); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); FirehoseKafkaConsumer consumer = Mockito.mock(FirehoseKafkaConsumer.class); KafkaConsumerConfig config = ConfigFactory.create(KafkaConsumerConfig.class, new HashMap<>()); @@ -92,7 +92,7 @@ public void shouldCommitToKafkaWithSinkManagesOwnOffsets() { offsetManager.setCommittable("test"); return null; }).when(s3).calculateCommittableOffsets(); - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, consumer, config, instrumentation); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(sinks, offsetManager, consumer, config, firehoseInstrumentation); consumerAndOffsetManager.addOffsetsAndSetCommittable(consumerAndOffsetManager.readMessages()); consumerAndOffsetManager.commit(); Mockito.verify(consumer, Mockito.times(1)).commit(new HashMap() {{ diff --git a/src/test/java/io/odpf/firehose/consumer/FirehoseAsyncConsumerTest.java b/src/test/java/io/odpf/firehose/consumer/FirehoseAsyncConsumerTest.java index 0f4ace82b..c2e00e998 100644 --- a/src/test/java/io/odpf/firehose/consumer/FirehoseAsyncConsumerTest.java +++ b/src/test/java/io/odpf/firehose/consumer/FirehoseAsyncConsumerTest.java @@ -2,12 +2,12 @@ import io.odpf.firehose.consumer.kafka.ConsumerAndOffsetManager; import io.odpf.firehose.message.Message; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.SinkPool; import io.odpf.firehose.exception.SinkTaskFailedException; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; import io.odpf.firehose.filter.NoOpFilter; -import io.odpf.firehose.metrics.Instrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.tracer.SinkTracer; import org.junit.Before; @@ -33,7 +33,7 @@ public class FirehoseAsyncConsumerTest { @Mock private SinkTracer tracer; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Future> future1; @Mock @@ -45,8 +45,8 @@ public class FirehoseAsyncConsumerTest { @Before public void setUp() { MockitoAnnotations.initMocks(this); - FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(instrumentation), instrumentation); - this.asyncConsumer = new FirehoseAsyncConsumer(sinkPool, tracer, consumerAndOffsetManager, firehoseFilter, instrumentation); + FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(firehoseInstrumentation), firehoseInstrumentation); + this.asyncConsumer = new FirehoseAsyncConsumer(sinkPool, tracer, consumerAndOffsetManager, firehoseFilter, firehoseInstrumentation); } @Test @@ -114,7 +114,7 @@ public void shouldThrowExceptionIfSinkTaskFails() { @Test public void shouldAddOffsetsForFilteredMessages() throws Exception { FirehoseFilter firehoseFilter = Mockito.mock(FirehoseFilter.class); - this.asyncConsumer = new FirehoseAsyncConsumer(sinkPool, tracer, consumerAndOffsetManager, firehoseFilter, instrumentation); + this.asyncConsumer = new FirehoseAsyncConsumer(sinkPool, tracer, consumerAndOffsetManager, firehoseFilter, firehoseInstrumentation); List messages = new ArrayList() {{ add(new Message(new byte[0], new byte[0], "topic1", 1, 10)); @@ -145,6 +145,6 @@ public void shouldAddOffsetsForFilteredMessages() throws Exception { }}); Mockito.verify(consumerAndOffsetManager, Mockito.times(1)).setCommittable(future1); Mockito.verify(consumerAndOffsetManager, Mockito.times(1)).commit(); - Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince(Mockito.eq(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS), Mockito.any(Instant.class)); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureDurationSince(Mockito.eq(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS), Mockito.any(Instant.class)); } } diff --git a/src/test/java/io/odpf/firehose/consumer/FirehoseFilterTest.java b/src/test/java/io/odpf/firehose/consumer/FirehoseFilterTest.java index 3fc8f096a..fb5a43120 100644 --- a/src/test/java/io/odpf/firehose/consumer/FirehoseFilterTest.java +++ b/src/test/java/io/odpf/firehose/consumer/FirehoseFilterTest.java @@ -4,7 +4,7 @@ import io.odpf.firehose.filter.Filter; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -34,9 +34,9 @@ public void shouldReturnAllMessages() throws FilterException { add(message3); }}; Filter filter = Mockito.mock(Filter.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); - FirehoseFilter firehoseFilter = new FirehoseFilter(filter, instrumentation); + FirehoseFilter firehoseFilter = new FirehoseFilter(filter, firehoseInstrumentation); Mockito.when(filter.filter(messages)).thenReturn(new FilteredMessages() {{ addToValidMessages(message1); addToValidMessages(message2); @@ -47,7 +47,7 @@ public void shouldReturnAllMessages() throws FilterException { Assert.assertEquals(messages, actualFilteredMessage.getValidMessages()); Assert.assertEquals(new ArrayList<>(), actualFilteredMessage.getInvalidMessages()); Mockito.verify(filter, Mockito.times(1)).filter(messages); - Mockito.verify(instrumentation, Mockito.times(0)).captureFilteredMessageCount(Mockito.anyInt()); + Mockito.verify(firehoseInstrumentation, Mockito.times(0)).captureFilteredMessageCount(Mockito.anyInt()); } @@ -72,8 +72,8 @@ public void shouldPartitiondMessages() throws FilterException { }}; Filter filter = Mockito.mock(Filter.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - FirehoseFilter firehoseFilter = new FirehoseFilter(filter, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + FirehoseFilter firehoseFilter = new FirehoseFilter(filter, firehoseInstrumentation); Mockito.when(filter.filter(messages)).thenReturn(new FilteredMessages() {{ addToValidMessages(message1); addToInvalidMessages(message2); @@ -89,6 +89,6 @@ public void shouldPartitiondMessages() throws FilterException { add(message3); }}, actualFilteredMessage.getInvalidMessages()); Mockito.verify(filter, Mockito.times(1)).filter(messages); - Mockito.verify(instrumentation, Mockito.times(1)).captureFilteredMessageCount(2); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureFilteredMessageCount(2); } } diff --git a/src/test/java/io/odpf/firehose/consumer/FirehoseSyncConsumerTest.java b/src/test/java/io/odpf/firehose/consumer/FirehoseSyncConsumerTest.java index 46753e08d..58dc136b0 100644 --- a/src/test/java/io/odpf/firehose/consumer/FirehoseSyncConsumerTest.java +++ b/src/test/java/io/odpf/firehose/consumer/FirehoseSyncConsumerTest.java @@ -7,7 +7,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.filter.FilteredMessages; import io.odpf.firehose.filter.NoOpFilter; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.Sink; import io.odpf.firehose.tracer.SinkTracer; @@ -17,7 +17,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.time.Instant; @@ -26,7 +26,6 @@ import java.util.Collections; import java.util.List; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @@ -36,7 +35,7 @@ public class FirehoseSyncConsumerTest { @Mock private Sink sink; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private SinkTracer tracer; private FirehoseSyncConsumer firehoseSyncConsumer; @@ -49,9 +48,9 @@ public void setUp() throws Exception { messages = Arrays.asList(msg1, msg2); OffsetManager offsetManger = new OffsetManager(); KafkaConsumerConfig kafkaConsumerConfig = ConfigFactory.create(KafkaConsumerConfig.class, System.getenv()); - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), offsetManger, firehoseKafkaConsumer, kafkaConsumerConfig, instrumentation); - FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(instrumentation), instrumentation); - firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, instrumentation); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), offsetManger, firehoseKafkaConsumer, kafkaConsumerConfig, firehoseInstrumentation); + FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(firehoseInstrumentation), firehoseInstrumentation); + firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, firehoseInstrumentation); when(firehoseKafkaConsumer.readMessages()).thenReturn(messages); } @@ -65,13 +64,13 @@ public void shouldProcessPartitions() throws IOException { public void shouldProcessEmptyPartitions() throws IOException { when(firehoseKafkaConsumer.readMessages()).thenReturn(new ArrayList<>()); firehoseSyncConsumer.process(); - verify(sink, times(0)).pushMessage(anyList()); + verify(sink, times(0)).pushMessage(new ArrayList<>()); } @Test public void shouldSendNoOfMessagesReceivedCount() throws IOException { firehoseSyncConsumer.process(); - verify(instrumentation).logInfo("Processed {} records in consumer", 2); + verify(firehoseInstrumentation).logInfo("Processed {} records in consumer", 2); } @Test @@ -85,18 +84,18 @@ public void shouldCallTracerWithTheSpan() throws IOException { @Test public void shouldCloseConsumerIfConsumerIsNotNull() throws IOException { firehoseSyncConsumer.close(); - verify(instrumentation, times(1)).logInfo("closing consumer"); + verify(firehoseInstrumentation, times(1)).logInfo("closing consumer"); verify(tracer, times(1)).close(); verify(firehoseKafkaConsumer, times(1)).close(); verify(sink, times(1)).close(); - verify(instrumentation, times(1)).close(); + verify(firehoseInstrumentation, times(1)).close(); } @Test public void shouldAddOffsetsForInvalidMessages() throws Exception { FirehoseFilter firehoseFilter = Mockito.mock(FirehoseFilter.class); ConsumerAndOffsetManager consumerAndOffsetManager = Mockito.mock(ConsumerAndOffsetManager.class); - firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, instrumentation); + firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, firehoseInstrumentation); Message msg1 = new Message(new byte[]{}, new byte[]{}, "topic", 0, 100); Message msg2 = new Message(new byte[]{}, new byte[]{}, "topic", 0, 100); Message msg3 = new Message(new byte[]{}, new byte[]{}, "topic", 0, 100); @@ -123,23 +122,23 @@ public void shouldAddOffsetsForInvalidMessages() throws Exception { }}); Mockito.verify(consumerAndOffsetManager, times(1)).commit(); - verify(instrumentation, times(1)).logInfo("Processed {} records in consumer", 3); + verify(firehoseInstrumentation, times(1)).logInfo("Processed {} records in consumer", 3); verify(tracer, times(1)).startTrace(messages); verify(tracer, times(1)).finishTrace(new ArrayList<>()); - verify(instrumentation, times(1)).captureDurationSince(eq(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS), any(Instant.class)); + verify(firehoseInstrumentation, times(1)).captureDurationSince(eq(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS), any(Instant.class)); } @Test public void shouldNotCloseConsumerIfConsumerIsNull() throws IOException { KafkaConsumerConfig kafkaConsumerConfig = ConfigFactory.create(KafkaConsumerConfig.class, System.getenv()); - ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), new OffsetManager(), null, kafkaConsumerConfig, instrumentation); - FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(instrumentation), instrumentation); - firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, instrumentation); + ConsumerAndOffsetManager consumerAndOffsetManager = new ConsumerAndOffsetManager(Collections.singletonList(sink), new OffsetManager(), null, kafkaConsumerConfig, firehoseInstrumentation); + FirehoseFilter firehoseFilter = new FirehoseFilter(new NoOpFilter(firehoseInstrumentation), firehoseInstrumentation); + firehoseSyncConsumer = new FirehoseSyncConsumer(sink, tracer, consumerAndOffsetManager, firehoseFilter, firehoseInstrumentation); firehoseSyncConsumer.close(); - verify(instrumentation, times(0)).logInfo("closing consumer"); + verify(firehoseInstrumentation, times(0)).logInfo("closing consumer"); verify(tracer, times(1)).close(); verify(firehoseKafkaConsumer, times(0)).close(); verify(sink, times(1)).close(); - verify(instrumentation, times(1)).close(); + verify(firehoseInstrumentation, times(1)).close(); } } diff --git a/src/test/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumerTest.java b/src/test/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumerTest.java index 20e18c732..d432abdc6 100644 --- a/src/test/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumerTest.java +++ b/src/test/java/io/odpf/firehose/consumer/kafka/FirehoseKafkaConsumerTest.java @@ -4,7 +4,7 @@ import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -20,7 +20,7 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.time.Duration; import java.util.Arrays; @@ -30,18 +30,12 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import static org.mockito.ArgumentMatchers.eq; - @RunWith(MockitoJUnitRunner.class) public class FirehoseKafkaConsumerTest { @Mock @@ -49,7 +43,7 @@ public class FirehoseKafkaConsumerTest { @Mock private ConsumerRecords consumerRecords; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private KafkaConsumerConfig consumerConfig; private TestMessage message; @@ -62,7 +56,7 @@ public void setUp() { MockitoAnnotations.initMocks(this); message = TestMessage.newBuilder().setOrderNumber("123").setOrderUrl("abc").setOrderDetails("details").build(); key = TestKey.newBuilder().setOrderNumber("123").setOrderUrl("abc").build(); - firehoseKafkaConsumer = new FirehoseKafkaConsumer(kafkaConsumer, consumerConfig, instrumentation); + firehoseKafkaConsumer = new FirehoseKafkaConsumer(kafkaConsumer, consumerConfig, firehoseInstrumentation); when(consumerConfig.getSourceKafkaPollTimeoutMs()).thenReturn(500L); when(consumerConfig.getSourceKafkaConsumerGroupId()).thenReturn(consumerGroupId); when(kafkaConsumer.poll(Duration.ofMillis(500L))).thenReturn(consumerRecords); @@ -116,11 +110,11 @@ public void shouldrecordStatsFromEsbLog() { firehoseKafkaConsumer.readMessages(); - verify(instrumentation, times(1)).logInfo("Pulled {} messages", 2); - verify(instrumentation, times(1)).capturePulledMessageHistogram(2); - verify(instrumentation, times(1)).logDebug("Pulled record: {}", record1); - verify(instrumentation, times(1)).logDebug("Pulled record: {}", record2); - verify(instrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Pulled {} messages", 2); + verify(firehoseInstrumentation, times(1)).capturePulledMessageHistogram(2); + verify(firehoseInstrumentation, times(1)).logDebug("Pulled record: {}", record1); + verify(firehoseInstrumentation, times(1)).logDebug("Pulled record: {}", record2); + verify(firehoseInstrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, 2); } @Test @@ -151,7 +145,7 @@ public void shouldCallCloseOnConsumer() { firehoseKafkaConsumer.close(); verify(kafkaConsumer).close(); - verify(instrumentation).logInfo("Consumer is closing"); + verify(firehoseInstrumentation).logInfo("Consumer is closing"); } @Test @@ -160,7 +154,7 @@ public void shouldSuppressExceptionOnClose() { try { firehoseKafkaConsumer.close(); - verify(instrumentation, times(1)).logInfo("Consumer is closing"); + verify(firehoseInstrumentation, times(1)).logInfo("Consumer is closing"); } catch (Exception kafkaConsumerException) { fail("Failed to supress exception on close"); } @@ -170,7 +164,7 @@ public void shouldSuppressExceptionOnClose() { public void shouldCaptureNonFatalError() { doThrow(new RuntimeException()).when(kafkaConsumer).close(); firehoseKafkaConsumer.close(); - verify(instrumentation, times(1)).captureNonFatalError(any(), eq("Exception while closing consumer")); + verify(firehoseInstrumentation, times(1)).captureNonFatalError(any(), any(), eq("Exception while closing consumer")); } @Test diff --git a/src/test/java/io/odpf/firehose/error/ErrorHandlerTest.java b/src/test/java/io/odpf/firehose/error/ErrorHandlerTest.java index 802f4fa62..8cdb1ceb6 100644 --- a/src/test/java/io/odpf/firehose/error/ErrorHandlerTest.java +++ b/src/test/java/io/odpf/firehose/error/ErrorHandlerTest.java @@ -1,5 +1,7 @@ package io.odpf.firehose.error; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.ErrorConfig; import io.odpf.firehose.message.Message; import org.aeonbits.owner.ConfigFactory; diff --git a/src/test/java/io/odpf/firehose/filter/NoOpFilterTest.java b/src/test/java/io/odpf/firehose/filter/NoOpFilterTest.java index e9088c315..885595dca 100644 --- a/src/test/java/io/odpf/firehose/filter/NoOpFilterTest.java +++ b/src/test/java/io/odpf/firehose/filter/NoOpFilterTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; @@ -18,7 +18,7 @@ public class NoOpFilterTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Before public void setup() { @@ -27,8 +27,8 @@ public void setup() { @Test public void shouldLogNoFilterSelected() { - new NoOpFilter(instrumentation); - verify(instrumentation, times(1)).logInfo("No filter is selected"); + new NoOpFilter(firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logInfo("No filter is selected"); } @Test @@ -39,7 +39,7 @@ public void shouldReturnInputListOfMessagesForProtobufMessageType() throws Filte TestMessage testMessageProto2 = TestMessage.newBuilder().setOrderNumber("92").setOrderUrl("pqr").setOrderDetails("details").build(); Message message1 = new Message(testKeyProto1.toByteArray(), testMessageProto1.toByteArray(), "topic1", 0, 100); Message message2 = new Message(testKeyProto2.toByteArray(), testMessageProto2.toByteArray(), "topic1", 0, 101); - NoOpFilter noOpFilter = new NoOpFilter(instrumentation); + NoOpFilter noOpFilter = new NoOpFilter(firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); expectedMessages.addToValidMessages(message2); @@ -58,7 +58,7 @@ public void shouldReturnInputListOfMessagesForJsonMessageType() throws FilterExc FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); expectedMessages.addToValidMessages(message2); - NoOpFilter noOpFilter = new NoOpFilter(instrumentation); + NoOpFilter noOpFilter = new NoOpFilter(firehoseInstrumentation); FilteredMessages filteredMessages = noOpFilter.filter(Arrays.asList(message1, message2)); assertEquals(expectedMessages, filteredMessages); } diff --git a/src/test/java/io/odpf/firehose/filter/jexl/JexlFilterTest.java b/src/test/java/io/odpf/firehose/filter/jexl/JexlFilterTest.java index 424b922a0..2818d56c2 100644 --- a/src/test/java/io/odpf/firehose/filter/jexl/JexlFilterTest.java +++ b/src/test/java/io/odpf/firehose/filter/jexl/JexlFilterTest.java @@ -10,7 +10,7 @@ import io.odpf.firehose.filter.Filter; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; import org.junit.Test; @@ -31,7 +31,7 @@ public class JexlFilterTest { private TestKey key; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Before public void setup() { @@ -49,7 +49,7 @@ public void setup() { @Test public void shouldFilterEsbMessages() throws FilterException { Message message = new Message(key.toByteArray(), this.testMessage.toByteArray(), "topic1", 0, 100); - filter = new JexlFilter(kafkaConsumerConfig, instrumentation); + filter = new JexlFilter(kafkaConsumerConfig, firehoseInstrumentation); FilteredMessages filteredMessages = filter.filter(Arrays.asList(message)); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message); @@ -66,7 +66,7 @@ public void shouldNotFilterEsbMessagesForEmptyBooleanValues() throws FilterExcep bookingFilterConfigs.put("FILTER_JEXL_EXPRESSION", "testBookingLogMessage.getCustomerDynamicSurgeEnabled() == false"); bookingFilterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestBookingLogMessage.class.getName()); FilterConfig bookingConsumerConfig = ConfigFactory.create(FilterConfig.class, bookingFilterConfigs); - JexlFilter bookingFilter = new JexlFilter(bookingConsumerConfig, instrumentation); + JexlFilter bookingFilter = new JexlFilter(bookingConsumerConfig, firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message); FilteredMessages filteredMessages = bookingFilter.filter(Arrays.asList(message)); @@ -81,7 +81,7 @@ public void shouldThrowExceptionOnInvalidFilterExpression() throws FilterExcepti filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); kafkaConsumerConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - filter = new JexlFilter(kafkaConsumerConfig, instrumentation); + filter = new JexlFilter(kafkaConsumerConfig, firehoseInstrumentation); key = TestKey.newBuilder().setOrderNumber("123").setOrderUrl("abc").build(); this.testMessage = TestMessage.newBuilder().setOrderNumber("123").setOrderUrl("abc").setOrderDetails("details").build(); @@ -97,9 +97,9 @@ public void shouldLogFilterTypeIfFilterTypeIsNotNone() { filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); kafkaConsumerConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - new JexlFilter(kafkaConsumerConfig, instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("\n\tFilter type: {}", FilterDataSourceType.MESSAGE); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("\n\tFilter schema: {}", TestMessage.class.getName()); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("\n\tFilter expression: {}", "testMessage.getOrderNumber() == 123"); + new JexlFilter(kafkaConsumerConfig, firehoseInstrumentation); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("\n\tFilter type: {}", FilterDataSourceType.MESSAGE); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("\n\tFilter schema: {}", TestMessage.class.getName()); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("\n\tFilter expression: {}", "testMessage.getOrderNumber() == 123"); } } diff --git a/src/test/java/io/odpf/firehose/filter/json/JsonFilterTest.java b/src/test/java/io/odpf/firehose/filter/json/JsonFilterTest.java index f72972021..9346369de 100644 --- a/src/test/java/io/odpf/firehose/filter/json/JsonFilterTest.java +++ b/src/test/java/io/odpf/firehose/filter/json/JsonFilterTest.java @@ -9,9 +9,9 @@ import io.odpf.firehose.consumer.TestMessage; import io.odpf.firehose.filter.FilterException; import io.odpf.firehose.filter.FilteredMessages; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; +import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; -import io.odpf.stencil.Parser; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; import org.junit.Rule; @@ -19,7 +19,6 @@ import org.junit.rules.ExpectedException; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; import java.util.Arrays; import java.util.Collections; @@ -30,9 +29,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; public class JsonFilterTest { @@ -47,15 +44,13 @@ public class JsonFilterTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; - @Mock + private FirehoseInstrumentation firehoseInstrumentation; private StencilClient stencilClient; - @Mock - private Parser parser; @Before public void setup() throws InvalidProtocolBufferException { - MockitoAnnotations.initMocks(this); + stencilClient = StencilClientFactory.getClient(); + firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); testKeyProto1 = TestKey.newBuilder().setOrderNumber("123").setOrderUrl("abc").build(); testMessageProto1 = TestMessage.newBuilder().setOrderNumber("123").setOrderUrl("abc").setOrderDetails("details").build(); testKeyJson1 = "{\"order_number\":\"123\",\"order_url\":\"abc\"}"; @@ -64,8 +59,6 @@ public void setup() throws InvalidProtocolBufferException { testMessageProto2 = TestMessage.newBuilder().setOrderNumber("92").setOrderUrl("pqr").setOrderDetails("details").build(); testKeyJson2 = "{\"order_number\":\"92\",\"order_url\":\"pqr\"}"; testMessageJson2 = "{\"order_number\":\"92\",\"order_url\":\"pqr\",\"order_details\":\"details\"}"; - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); } @Test @@ -78,8 +71,7 @@ public void shouldFilterEsbMessagesForProtobufMessageType() throws FilterExcepti filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); FilteredMessages filteredMessages = jsonFilter.filter(Arrays.asList(message1, message2)); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); @@ -108,8 +100,7 @@ public void shouldFilterMessagesWithNestedFieldsForProtobufMessageType() throws filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"driver_pickup_location\":{\"properties\":{\"latitude\":{\"minimum\":88}}}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestBookingLogMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestBookingLogMessage.class.getName())).thenReturn(TestBookingLogMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); FilteredMessages filteredMessages = jsonFilter.filter(Arrays.asList(message1, message2)); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message2); @@ -128,8 +119,7 @@ public void shouldFilterEsbMessagesForJsonMessageType() throws FilterException { filterConfigs.put("FILTER_DATA_SOURCE", "message"); filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); expectedMessages.addToInvalidMessages(message2); @@ -149,8 +139,7 @@ public void shouldNotFilterProtobufMessagesWhenEmptyJSONSchema() throws FilterEx filterConfigs.put("FILTER_JSON_SCHEMA", ""); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); expectedMessages.addToValidMessages(message2); @@ -168,8 +157,7 @@ public void shouldNotFilterJsonMessagesWhenEmptyJSONSchema() throws FilterExcept filterConfigs.put("FILTER_DATA_SOURCE", "message"); filterConfigs.put("FILTER_JSON_SCHEMA", ""); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message1); expectedMessages.addToValidMessages(message2); @@ -190,8 +178,7 @@ public void shouldNotFilterEsbMessagesForEmptyBooleanValuesForProtobufMessageTyp filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestBookingLogMessage.class.getName()); filterConfigs.put("FILTER_ESB_MESSAGE_FORMAT", "PROTOBUF"); FilterConfig bookingConsumerConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestBookingLogMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - JsonFilter bookingFilter = new JsonFilter(stencilClient, bookingConsumerConfig, instrumentation); + JsonFilter bookingFilter = new JsonFilter(stencilClient, bookingConsumerConfig, firehoseInstrumentation); FilteredMessages expectedMessages = new FilteredMessages(); expectedMessages.addToValidMessages(message); FilteredMessages filteredMessages = bookingFilter.filter(Collections.singletonList(message)); @@ -208,8 +195,7 @@ public void shouldThrowExceptionWhenJsonMessageInvalid() throws FilterException filterConfigs.put("FILTER_DATA_SOURCE", "key"); filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); thrown.expect(FilterException.class); thrown.expectMessage("Failed to parse JSON message"); jsonFilter.filter(Arrays.asList(message1, message2)); @@ -225,8 +211,7 @@ public void shouldThrowExceptionWhenProtobufMessageInvalid() throws FilterExcept filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestKey.class.getName())).thenReturn(TestKey.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); thrown.expect(FilterException.class); thrown.expectMessage("Failed to parse Protobuf message"); jsonFilter.filter(Arrays.asList(message1, message2)); @@ -242,8 +227,7 @@ public void shouldThrowExceptionWhenProtoSchemaClassInvalid() throws FilterExcep filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", "ss"); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestKey.class.getName())).thenReturn(TestKey.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); thrown.expect(FilterException.class); thrown.expectMessage("Failed to parse Protobuf message"); jsonFilter.filter(Arrays.asList(message1, message2)); @@ -259,10 +243,9 @@ public void shouldLogCauseToFilterOutMessageForProtobufMessageFormat() throws Fi filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); jsonFilter.filter(Arrays.asList(message1, message2)); - verify(instrumentation, times(1)).logDebug("Message filtered out due to: {}", "$.order_number: must be a constant value 123"); + verify(firehoseInstrumentation, times(1)).logDebug("Message filtered out due to: {}", "$.order_number: must be a constant value 123"); } @Test @@ -274,9 +257,8 @@ public void shouldLogCauseToFilterOutMessageForJsonMessageFormat() throws Filter filterConfigs.put("FILTER_DATA_SOURCE", "message"); filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - when(stencilClient.get(TestMessage.class.getName())).thenReturn(TestMessage.getDescriptor()); - jsonFilter = new JsonFilter(stencilClient, filterConfig, instrumentation); + jsonFilter = new JsonFilter(stencilClient, filterConfig, firehoseInstrumentation); jsonFilter.filter(Arrays.asList(message1, message2)); - verify(instrumentation, times(1)).logDebug("Message filtered out due to: {}", "$.order_number: must be a constant value 123"); + verify(firehoseInstrumentation, times(1)).logDebug("Message filtered out due to: {}", "$.order_number: must be a constant value 123"); } } diff --git a/src/test/java/io/odpf/firehose/filter/json/JsonFilterUtilTest.java b/src/test/java/io/odpf/firehose/filter/json/JsonFilterUtilTest.java index 0d61c346d..b5900de6e 100644 --- a/src/test/java/io/odpf/firehose/filter/json/JsonFilterUtilTest.java +++ b/src/test/java/io/odpf/firehose/filter/json/JsonFilterUtilTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.FilterConfig; import io.odpf.firehose.config.enums.FilterDataSourceType; import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; import org.junit.Rule; @@ -25,7 +25,7 @@ public class JsonFilterUtilTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private FilterConfig filterConfig; @@ -42,11 +42,11 @@ public void shouldLogFilterConfigsForValidConfiguration() { filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - JsonFilterUtil.logConfigs(filterConfig, instrumentation); - verify(instrumentation, times(1)).logInfo("\n\tFilter data source type: {}", FilterDataSourceType.MESSAGE); - verify(instrumentation, times(1)).logInfo("\n\tMessage Proto class: {}", TestMessage.class.getName()); - verify(instrumentation, times(1)).logInfo("\n\tFilter JSON Schema: {}", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); - verify(instrumentation, times(1)).logInfo("\n\tFilter ESB message format: {}", PROTOBUF); + JsonFilterUtil.logConfigs(filterConfig, firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter data source type: {}", FilterDataSourceType.MESSAGE); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tMessage Proto class: {}", TestMessage.class.getName()); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter JSON Schema: {}", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter ESB message format: {}", PROTOBUF); } @Test @@ -56,10 +56,10 @@ public void shouldLogFilterConfigsForInvalidConfiguration() { filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - JsonFilterUtil.logConfigs(filterConfig, instrumentation); - verify(instrumentation, times(1)).logInfo("\n\tFilter data source type: {}", FilterDataSourceType.MESSAGE); - verify(instrumentation, times(1)).logInfo("\n\tFilter JSON Schema: {}", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); - verify(instrumentation, times(1)).logInfo("\n\tFilter ESB message format: {}", (Object) null); + JsonFilterUtil.logConfigs(filterConfig, firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter data source type: {}", FilterDataSourceType.MESSAGE); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter JSON Schema: {}", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); + verify(firehoseInstrumentation, times(1)).logInfo("\n\tFilter ESB message format: {}", (Object) null); } @Test @@ -70,8 +70,8 @@ public void shouldThrowIllegalArgumentExceptionForNullJsonSchema() { filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); thrown.expect(IllegalArgumentException.class); - JsonFilterUtil.validateConfigs(filterConfig, instrumentation); - verify(instrumentation, times(1)).logError("Failed to create filter due to invalid config"); + JsonFilterUtil.validateConfigs(filterConfig, firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logError("Failed to create filter due to invalid config"); } @Test @@ -82,8 +82,8 @@ public void shouldThrowIllegalArgumentExceptionForNullMessageFormat() { filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); thrown.expect(IllegalArgumentException.class); - JsonFilterUtil.validateConfigs(filterConfig, instrumentation); - verify(instrumentation, times(1)).logError("Failed to create filter due to invalid config"); + JsonFilterUtil.validateConfigs(filterConfig, firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logError("Failed to create filter due to invalid config"); } @Test @@ -94,8 +94,8 @@ public void shouldThrowExceptionForNullProtoSchemaClassForProtobufMessageFormat( filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); thrown.expect(IllegalArgumentException.class); - JsonFilterUtil.validateConfigs(filterConfig, instrumentation); - verify(instrumentation, times(1)).logError("Failed to create filter due to invalid config"); + JsonFilterUtil.validateConfigs(filterConfig, firehoseInstrumentation); + verify(firehoseInstrumentation, times(1)).logError("Failed to create filter due to invalid config"); } @Test @@ -106,6 +106,6 @@ public void shouldNotThrowIllegalArgumentExceptionForValidFilterConfig() { filterConfigs.put("FILTER_JSON_SCHEMA", "{\"properties\":{\"order_number\":{\"const\":\"123\"}}}"); filterConfigs.put("FILTER_SCHEMA_PROTO_CLASS", TestMessage.class.getName()); filterConfig = ConfigFactory.create(FilterConfig.class, filterConfigs); - JsonFilterUtil.validateConfigs(filterConfig, instrumentation); + JsonFilterUtil.validateConfigs(filterConfig, firehoseInstrumentation); } } diff --git a/src/test/java/io/odpf/firehose/launch/TaskTest.java b/src/test/java/io/odpf/firehose/launch/TaskTest.java index 0b7f95e1f..c20887788 100644 --- a/src/test/java/io/odpf/firehose/launch/TaskTest.java +++ b/src/test/java/io/odpf/firehose/launch/TaskTest.java @@ -1,11 +1,11 @@ package io.odpf.firehose.launch; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.util.ArrayList; import java.util.List; @@ -21,12 +21,12 @@ public class TaskTest { private static final long SLEEP_SECONDS = 10L; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Test public void shouldExecuteTaskWithParallelism() throws InterruptedException { final List threadList = new ArrayList<>(); - Task task = new Task(PARALLELISM, THREAD_CLEANUP_DELAY_IN_MS, instrumentation, callback -> { + Task task = new Task(PARALLELISM, THREAD_CLEANUP_DELAY_IN_MS, firehoseInstrumentation, callback -> { threadList.add(Thread.currentThread().getId()); callback.run(); }); @@ -38,7 +38,7 @@ public void shouldExecuteTaskWithParallelism() throws InterruptedException { @Test @Ignore public void shouldExecuteTaskUntilStopped() throws InterruptedException { final ConcurrentHashMap threadResults = new ConcurrentHashMap(); - Task task = new Task(PARALLELISM, THREAD_CLEANUP_DELAY_IN_MS, instrumentation, callback -> { + Task task = new Task(PARALLELISM, THREAD_CLEANUP_DELAY_IN_MS, firehoseInstrumentation, callback -> { try { while (!Thread.interrupted()) { threadResults.put(Thread.currentThread().getId(), "thread started"); diff --git a/src/test/java/io/odpf/firehose/message/FirehoseMessageUtilsTest.java b/src/test/java/io/odpf/firehose/message/FirehoseMessageUtilsTest.java new file mode 100644 index 000000000..ddad28121 --- /dev/null +++ b/src/test/java/io/odpf/firehose/message/FirehoseMessageUtilsTest.java @@ -0,0 +1,73 @@ +package io.odpf.firehose.message; + + +import io.odpf.depot.common.Tuple; +import io.odpf.depot.message.OdpfMessage; +import org.gradle.internal.impldep.org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class FirehoseMessageUtilsTest { + + @Test + public void shouldConvertToOdpfMessage() { + Message m1 = new Message( + "testKey".getBytes(), + "testMessage".getBytes(), + "topic", + 12, + 1 + ); + Message m2 = new Message( + "testKey1".getBytes(), + "testMessage1".getBytes(), + "topic1", + 11, + 2 + ); + List actualMessages = FirehoseMessageUtils.convertToOdpfMessage(new ArrayList() {{ + add(m1); + add(m2); + }}); + Assert.assertEquals(2, actualMessages.size()); + OdpfMessage expectedMessage1 = new OdpfMessage( + "testKey".getBytes(), + "testMessage".getBytes(), + new Tuple<>("message_topic", "topic"), + new Tuple<>("message_partition", 12), + new Tuple<>("message_offset", 1L), + new Tuple<>("message_headers", null), + new Tuple<>("message_timestamp", 0L), + new Tuple<>("load_time", 0L)); + OdpfMessage expectedMessage2 = new OdpfMessage( + "testKey1".getBytes(), + "testMessage1".getBytes(), + new Tuple<>("message_topic", "topic1"), + new Tuple<>("message_partition", 11), + new Tuple<>("message_offset", 2L), + new Tuple<>("message_headers", null), + new Tuple<>("message_timestamp", 0L), + new Tuple<>("load_time", 0L)); + + OdpfMessage actualMessage1 = actualMessages.get(0); + OdpfMessage actualMessage2 = actualMessages.get(1); + + Assert.assertTrue(Arrays.equals((byte[]) expectedMessage1.getLogKey(), (byte[]) actualMessage1.getLogKey())); + Assert.assertTrue(Arrays.equals((byte[]) expectedMessage1.getLogMessage(), (byte[]) actualMessage1.getLogMessage())); + Assert.assertEquals(expectedMessage1.getMetadata(), actualMessage1.getMetadata()); + + Assert.assertTrue(Arrays.equals((byte[]) expectedMessage2.getLogKey(), (byte[]) actualMessage2.getLogKey())); + Assert.assertTrue(Arrays.equals((byte[]) expectedMessage2.getLogMessage(), (byte[]) actualMessage2.getLogMessage())); + Assert.assertEquals(expectedMessage2.getMetadata(), actualMessage2.getMetadata()); + } + + @Test + public void shouldReturnEmptyList() { + List actualMessages = FirehoseMessageUtils.convertToOdpfMessage(new ArrayList<>()); + Assert.assertEquals(Collections.emptyList(), actualMessages); + } +} diff --git a/src/test/java/io/odpf/firehose/message/MessageTest.java b/src/test/java/io/odpf/firehose/message/MessageTest.java index e4daf1eda..71744f48b 100644 --- a/src/test/java/io/odpf/firehose/message/MessageTest.java +++ b/src/test/java/io/odpf/firehose/message/MessageTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.message; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.exception.DefaultException; import org.junit.Assert; import org.junit.Before; diff --git a/src/test/java/io/odpf/firehose/metrics/InstrumentationTest.java b/src/test/java/io/odpf/firehose/metrics/FirehoseInstrumentationTest.java similarity index 67% rename from src/test/java/io/odpf/firehose/metrics/InstrumentationTest.java rename to src/test/java/io/odpf/firehose/metrics/FirehoseInstrumentationTest.java index 9a31d9d05..0c898f517 100644 --- a/src/test/java/io/odpf/firehose/metrics/InstrumentationTest.java +++ b/src/test/java/io/odpf/firehose/metrics/FirehoseInstrumentationTest.java @@ -1,12 +1,13 @@ package io.odpf.firehose.metrics; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.message.Message; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import java.io.IOException; @@ -20,7 +21,7 @@ import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) -public class InstrumentationTest { +public class FirehoseInstrumentationTest { @Mock private StatsDReporter statsDReporter; @Mock @@ -28,14 +29,14 @@ public class InstrumentationTest { @Mock private Message message; - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private String testMessage; private String testTemplate; private Exception e; @Before public void setUp() { - instrumentation = new Instrumentation(statsDReporter, logger); + firehoseInstrumentation = new FirehoseInstrumentation(statsDReporter, logger); testMessage = "test"; testTemplate = "test: {},{},{}"; e = new Exception(); @@ -43,57 +44,57 @@ public void setUp() { @Test public void shouldLogString() { - instrumentation.logInfo(testMessage); - verify(logger, times(1)).info(testMessage); + firehoseInstrumentation.logInfo(testMessage); + verify(logger, times(1)).info(testMessage, new Object[0]); } @Test public void shouldLogStringTemplate() { - instrumentation.logInfo(testTemplate, 1, 2, 3); + firehoseInstrumentation.logInfo(testTemplate, 1, 2, 3); verify(logger, times(1)).info(testTemplate, 1, 2, 3); } @Test public void shouldLogWarnStringTemplate() { - instrumentation.logWarn(testTemplate, 1, 2, 3); + firehoseInstrumentation.logWarn(testTemplate, 1, 2, 3); verify(logger, times(1)).warn(testTemplate, 1, 2, 3); } @Test public void shouldLogDebugStringTemplate() { - instrumentation.logDebug(testTemplate, 1, 2, 3); + firehoseInstrumentation.logDebug(testTemplate, 1, 2, 3); verify(logger, times(1)).debug(testTemplate, 1, 2, 3); } @Test public void shouldLogErrorStringTemplate() { - instrumentation.logError(testTemplate, 1, 2, 3); + firehoseInstrumentation.logError(testTemplate, 1, 2, 3); verify(logger, times(1)).error(testTemplate, 1, 2, 3); } @Test public void shouldCapturePulledMessageHistogram() { - instrumentation.capturePulledMessageHistogram(1); + firehoseInstrumentation.capturePulledMessageHistogram(1); verify(statsDReporter, times(1)).captureHistogram(Metrics.SOURCE_KAFKA_PULL_BATCH_SIZE_TOTAL, 1); } @Test public void shouldCaptureFilteredMessageCount() { - instrumentation.captureFilteredMessageCount(1); - verify(statsDReporter, times(1)).captureCount(Metrics.SOURCE_KAFKA_MESSAGES_FILTER_TOTAL, 1); + firehoseInstrumentation.captureFilteredMessageCount(1); + verify(statsDReporter, times(1)).captureCount(Metrics.SOURCE_KAFKA_MESSAGES_FILTER_TOTAL, 1L); } @Test public void shouldCaptureNonFatalErrorWithStringMessage() { - instrumentation.captureNonFatalError(e, testMessage); - verify(logger, times(1)).warn(testMessage); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, testMessage); + verify(logger, times(1)).warn(testMessage, new Object[0]); verify(logger, times(1)).warn(e.getMessage(), e); verify(statsDReporter, times(1)).recordEvent(Metrics.ERROR_EVENT, Metrics.NON_FATAL_ERROR, Metrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + Metrics.NON_FATAL_ERROR); } @Test public void shouldCaptureNonFatalErrorWithStringTemplate() { - instrumentation.captureNonFatalError(e, testTemplate, 1, 2, 3); + firehoseInstrumentation.captureNonFatalError("firehose_error_event", e, testTemplate, 1, 2, 3); verify(logger, times(1)).warn(testTemplate, 1, 2, 3); verify(logger, times(1)).warn(e.getMessage(), e); verify(statsDReporter, times(1)).recordEvent(Metrics.ERROR_EVENT, Metrics.NON_FATAL_ERROR, Metrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + Metrics.NON_FATAL_ERROR); @@ -101,15 +102,15 @@ public void shouldCaptureNonFatalErrorWithStringTemplate() { @Test public void shouldCaptureFatalErrorWithStringMessage() { - instrumentation.captureFatalError(e, testMessage); - verify(logger, times(1)).error(testMessage); + firehoseInstrumentation.captureFatalError("firehose_error_event", e, testMessage); + verify(logger, times(1)).error(testMessage, new Object[0]); verify(logger, times(1)).error(e.getMessage(), e); verify(statsDReporter, times(1)).recordEvent(Metrics.ERROR_EVENT, Metrics.FATAL_ERROR, Metrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + Metrics.FATAL_ERROR); } @Test public void shouldCaptureFatalErrorWithStringTemplate() { - instrumentation.captureFatalError(e, testTemplate, 1, 2, 3); + firehoseInstrumentation.captureFatalError("firehose_error_event", e, testTemplate, 1, 2, 3); verify(logger, times(1)).error(testTemplate, 1, 2, 3); verify(logger, times(1)).error(e.getMessage(), e); verify(statsDReporter, times(1)).recordEvent(Metrics.ERROR_EVENT, Metrics.FATAL_ERROR, Metrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + Metrics.FATAL_ERROR); @@ -117,40 +118,40 @@ public void shouldCaptureFatalErrorWithStringTemplate() { @Test public void shouldSetStartExecutionTime() { - instrumentation.startExecution(); - Assert.assertEquals(instrumentation.getStartExecutionTime().getEpochSecond(), java.time.Instant.now().getEpochSecond()); + firehoseInstrumentation.startExecution(); + Assert.assertEquals(firehoseInstrumentation.getStartExecutionTime().getEpochSecond(), java.time.Instant.now().getEpochSecond()); } @Test public void shouldReturnStartExecutionTime() { - Instant time = instrumentation.startExecution(); - Assert.assertEquals(instrumentation.getStartExecutionTime().getEpochSecond(), time.getEpochSecond()); + Instant time = firehoseInstrumentation.startExecution(); + Assert.assertEquals(firehoseInstrumentation.getStartExecutionTime().getEpochSecond(), time.getEpochSecond()); } @Test public void shouldCaptureLifetimeTillSink() { List messages = Collections.singletonList(message); - instrumentation.capturePreExecutionLatencies(messages); + firehoseInstrumentation.capturePreExecutionLatencies(messages); verify(statsDReporter, times(messages.size())).captureDurationSince("firehose_pipeline_execution_lifetime_milliseconds", Instant.ofEpochSecond(message.getTimestamp())); } @Test public void shouldCaptureGlobalMetrics() { - instrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, 1); - verify(statsDReporter, times(1)).captureCount(GLOBAL_MESSAGES_TOTAL, 1, String.format(MESSAGE_SCOPE_TAG, Metrics.MessageScope.CONSUMER)); + firehoseInstrumentation.captureGlobalMessageMetrics(Metrics.MessageScope.CONSUMER, 1); + verify(statsDReporter, times(1)).captureCount(GLOBAL_MESSAGES_TOTAL, 1L, String.format(MESSAGE_SCOPE_TAG, Metrics.MessageScope.CONSUMER)); } @Test public void shouldCaptureLatencyAcrossFirehose() { List messages = Collections.singletonList(message); - instrumentation.capturePreExecutionLatencies(messages); + firehoseInstrumentation.capturePreExecutionLatencies(messages); verify(statsDReporter, times(messages.size())).captureDurationSince("firehose_pipeline_end_latency_milliseconds", Instant.ofEpochSecond(message.getConsumeTimestamp())); } @Test public void shouldCapturePartitionProcessTime() { Instant instant = Instant.now(); - instrumentation.captureDurationSince(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, instant); + firehoseInstrumentation.captureDurationSince(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, instant); verify(statsDReporter, times(1)).captureDurationSince(Metrics.SOURCE_KAFKA_PARTITIONS_PROCESS_TIME_MILLISECONDS, instant); } @@ -158,7 +159,7 @@ public void shouldCapturePartitionProcessTime() { public void shouldCaptureBackoffSleepTime() { String metric = "firehose_retry_backoff_sleep_milliseconds"; int sleepTime = 10000; - instrumentation.captureSleepTime(metric, sleepTime); + firehoseInstrumentation.captureSleepTime(metric, sleepTime); verify(statsDReporter, times(1)).gauge(metric, sleepTime); } @@ -167,28 +168,28 @@ public void shouldCaptureCountWithTags() { String metric = "test_metric"; String urlTag = "url=test"; String httpCodeTag = "status_code=200"; - instrumentation.captureCount(metric, 1, httpCodeTag, urlTag); - verify(statsDReporter, times(1)).captureCount(metric, 1, httpCodeTag, urlTag); + firehoseInstrumentation.captureCount(metric, 1L, httpCodeTag, urlTag); + verify(statsDReporter, times(1)).captureCount(metric, 1L, httpCodeTag, urlTag); } @Test public void shouldIncrementCounterWithTags() { String metric = "test_metric"; String httpCodeTag = "status_code=200"; - instrumentation.incrementCounter(metric, httpCodeTag); + firehoseInstrumentation.incrementCounter(metric, httpCodeTag); verify(statsDReporter, times(1)).increment(metric, httpCodeTag); } @Test public void shouldIncrementCounter() { String metric = "test_metric"; - instrumentation.incrementCounter(metric); + firehoseInstrumentation.incrementCounter(metric); verify(statsDReporter, times(1)).increment(metric); } @Test public void shouldClose() throws IOException { - instrumentation.close(); + firehoseInstrumentation.close(); verify(statsDReporter, times(1)).close(); } } diff --git a/src/test/java/io/odpf/firehose/serializer/MessageToJsonTest.java b/src/test/java/io/odpf/firehose/serializer/MessageToJsonTest.java index 852c0a0b9..c14689ca6 100644 --- a/src/test/java/io/odpf/firehose/serializer/MessageToJsonTest.java +++ b/src/test/java/io/odpf/firehose/serializer/MessageToJsonTest.java @@ -1,8 +1,5 @@ package io.odpf.firehose.serializer; - - - import io.odpf.firehose.message.Message; import io.odpf.firehose.consumer.TestAggregatedSupplyMessage; import io.odpf.firehose.exception.DeserializerException; diff --git a/src/test/java/io/odpf/firehose/serializer/MessageToTemplatizedJsonTest.java b/src/test/java/io/odpf/firehose/serializer/MessageToTemplatizedJsonTest.java index d1204b39c..d70b984ac 100644 --- a/src/test/java/io/odpf/firehose/serializer/MessageToTemplatizedJsonTest.java +++ b/src/test/java/io/odpf/firehose/serializer/MessageToTemplatizedJsonTest.java @@ -7,7 +7,7 @@ import io.odpf.firehose.consumer.TestAggregatedSupplyMessage; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; import io.odpf.stencil.Parser; @@ -34,7 +34,7 @@ public class MessageToTemplatizedJsonTest { public ExpectedException expectedException = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Parser protoParser; @@ -55,7 +55,7 @@ public void shouldProperlySerializeMessageToTemplateWithSingleUnknownField() { StencilClient stencilClient = StencilClientFactory.getClient(); protoParser = stencilClient.getParser(TestAggregatedSupplyMessage.class.getName()); MessageToTemplatizedJson messageToTemplatizedJson = MessageToTemplatizedJson - .create(instrumentation, template, protoParser); + .create(firehoseInstrumentation, template, protoParser); Message message = new Message(Base64.getDecoder().decode(logKey.getBytes()), Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); @@ -70,7 +70,7 @@ public void shouldProperlySerializeMessageToTemplateWithAsItIs() { StencilClient stencilClient = StencilClientFactory.getClient(); protoParser = stencilClient.getParser(TestAggregatedSupplyMessage.class.getName()); MessageToTemplatizedJson messageToTemplatizedJson = MessageToTemplatizedJson - .create(instrumentation, template, protoParser); + .create(firehoseInstrumentation, template, protoParser); Message message = new Message(Base64.getDecoder().decode(logKey.getBytes()), Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); @@ -95,7 +95,7 @@ public void shouldThrowIfNoPathsFoundInTheProto() { StencilClient stencilClient = StencilClientFactory.getClient(); protoParser = stencilClient.getParser(TestAggregatedSupplyMessage.class.getName()); MessageToTemplatizedJson messageToTemplatizedJson = MessageToTemplatizedJson - .create(instrumentation, template, protoParser); + .create(firehoseInstrumentation, template, protoParser); Message message = new Message(Base64.getDecoder().decode(logKey.getBytes()), Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); @@ -108,7 +108,7 @@ public void shouldFailForNonJsonTemplate() { expectedException.expectMessage("must be a valid JSON."); String template = "{\"test:\"$.routes[0]\", \"$.order_number\" : \"xxx\"}"; - MessageToTemplatizedJson.create(instrumentation, template, protoParser); + MessageToTemplatizedJson.create(firehoseInstrumentation, template, protoParser); } @@ -118,7 +118,7 @@ public void shouldDoRegexMatchingToReplaceThingsFromProtobuf() { expectedException.expectMessage("must be a valid JSON."); String template = "{\"test:\"$.routes[0]\", \"$.order_number\" : \"xxx\"}"; - MessageToTemplatizedJson.create(instrumentation, template, protoParser); + MessageToTemplatizedJson.create(firehoseInstrumentation, template, protoParser); } @Test @@ -136,8 +136,8 @@ public void shouldLogPaths() { StencilClient stencilClient = StencilClientFactory.getClient(); protoParser = stencilClient.getParser(TestAggregatedSupplyMessage.class.getName()); - MessageToTemplatizedJson.create(instrumentation, template, protoParser); + MessageToTemplatizedJson.create(firehoseInstrumentation, template, protoParser); - Mockito.verify(instrumentation, Mockito.times(1)).logDebug("\nPaths: {}", pathList); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logDebug("\nPaths: {}", pathList); } } diff --git a/src/test/java/io/odpf/firehose/sink/AbstractSinkTest.java b/src/test/java/io/odpf/firehose/sink/AbstractSinkTest.java index 65b8fc609..69bddaf94 100644 --- a/src/test/java/io/odpf/firehose/sink/AbstractSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/AbstractSinkTest.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import org.junit.Assert; import org.junit.Test; @@ -17,13 +17,12 @@ import java.util.ArrayList; import java.util.List; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; public class AbstractSinkTest { private static class TestSink extends AbstractSink { - TestSink(Instrumentation instrumentation, String sinkType) { - super(instrumentation, sinkType); + TestSink(FirehoseInstrumentation firehoseInstrumentation, String sinkType) { + super(firehoseInstrumentation, sinkType); } private final List failedMessages = new ArrayList<>(); @@ -53,7 +52,7 @@ public void close() throws IOException { } @Mock - private Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + private FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); private Message createMessage(String topic, String key, String value) { return new Message(key.getBytes(), value.getBytes(), topic, 0, 0); @@ -61,8 +60,8 @@ private Message createMessage(String topic, String key, String value) { @Test public void shouldProcessMessages() { - when(instrumentation.startExecution()).thenReturn(Instant.now()); - TestSink sink = new TestSink(instrumentation, "TestSink"); + when(firehoseInstrumentation.startExecution()).thenReturn(Instant.now()); + TestSink sink = new TestSink(firehoseInstrumentation, "TestSink"); Message m1 = createMessage("test", "test", "test1"); Message m2 = createMessage("test", "test", "test2"); Message m3 = createMessage("test", "test", "test3"); @@ -74,26 +73,26 @@ public void shouldProcessMessages() { add(m4); }}); Assert.assertEquals(0, failedMessages.size()); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 4); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 4); - Mockito.verify(instrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.SINK, 4); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageBatchSize(4); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 4); - Mockito.verify(instrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.SINK, 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageBatchSize(4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ add(m1); add(m2); add(m3); add(m4); }}); - Mockito.verify(instrumentation, Mockito.times(1)).startExecution(); - Mockito.verify(instrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 4); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Pushed {} messages", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).startExecution(); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Pushed {} messages", 4); } @Test public void shouldProcessFailedMessages() { - when(instrumentation.startExecution()).thenReturn(Instant.now()); - TestSink sink = new TestSink(instrumentation, "TestSink"); + when(firehoseInstrumentation.startExecution()).thenReturn(Instant.now()); + TestSink sink = new TestSink(firehoseInstrumentation, "TestSink"); Message m1 = createMessage("test", "test", "test1"); Message m2 = createMessage("test", "test", "test2"); Message m3 = createMessage("test", "test", "test3"); @@ -111,32 +110,32 @@ public void shouldProcessFailedMessages() { add(m5); }}); Assert.assertEquals(3, failedMessages.size()); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 5); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); - Mockito.verify(instrumentation, Mockito.times(2)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DEFAULT_ERROR, 1); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); - Mockito.verify(instrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.SINK, 2); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageBatchSize(5); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 5); - Mockito.verify(instrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 5); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); + Mockito.verify(firehoseInstrumentation, Mockito.times(2)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DEFAULT_ERROR, 1); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.SINK, 2); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageBatchSize(5); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 5); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ add(m1); add(m2); add(m3); add(m4); add(m5); }}); - Mockito.verify(instrumentation, Mockito.times(1)).startExecution(); - Mockito.verify(instrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 5); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Pushed {} messages", 2); - Mockito.verify(instrumentation, Mockito.times(1)).logError("Failed to Push {} messages to sink ", 3); - Mockito.verify(instrumentation, Mockito.times(1)).captureErrorMetrics(ErrorType.DESERIALIZATION_ERROR); - Mockito.verify(instrumentation, Mockito.times(2)).captureErrorMetrics(ErrorType.DEFAULT_ERROR); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).startExecution(); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 5); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Pushed {} messages", 2); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logError("Failed to Push {} messages to sink ", 3); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureErrorMetrics(ErrorType.DESERIALIZATION_ERROR); + Mockito.verify(firehoseInstrumentation, Mockito.times(2)).captureErrorMetrics(ErrorType.DEFAULT_ERROR); } @Test public void shouldProcessException() { - when(instrumentation.startExecution()).thenReturn(Instant.now()); - TestSink sink = new TestSink(instrumentation, "TestSink"); + when(firehoseInstrumentation.startExecution()).thenReturn(Instant.now()); + TestSink sink = new TestSink(firehoseInstrumentation, "TestSink"); Message m1 = createMessage("test", "test", "test1"); Message m2 = createMessage("test", "test", "test2"); Message m3 = createMessage("test", "test", "test3"); @@ -150,25 +149,25 @@ public void shouldProcessException() { }}); Assert.assertEquals(4, failedMessages.size()); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 4); - Mockito.verify(instrumentation, Mockito.times(4)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DEFAULT_ERROR, 1); - Mockito.verify(instrumentation, Mockito.times(1)).captureMessageBatchSize(4); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 4); - Mockito.verify(instrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(4)).captureMessageMetrics(Metrics.SINK_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DEFAULT_ERROR, 1); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureMessageBatchSize(4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Preparing {} messages", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).capturePreExecutionLatencies(new ArrayList() {{ add(m1); add(m2); add(m3); add(m4); }}); - Mockito.verify(instrumentation, Mockito.times(1)).startExecution(); - Mockito.verify(instrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 4); - Mockito.verify(instrumentation, Mockito.times(1)).logError("Failed to Push {} messages to sink ", 4); - Mockito.verify(instrumentation, Mockito.times(4)).captureErrorMetrics(ErrorType.DEFAULT_ERROR); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).startExecution(); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureSinkExecutionTelemetry("TestSink", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logError("Failed to Push {} messages to sink ", 4); + Mockito.verify(firehoseInstrumentation, Mockito.times(4)).captureErrorMetrics(ErrorType.DEFAULT_ERROR); } @Test(expected = DeserializerException.class) public void shouldProcessExceptionInPrepare() { - TestSink sink = new TestSink(instrumentation, "TestSink"); + TestSink sink = new TestSink(firehoseInstrumentation, "TestSink"); Message m1 = createMessage("test", "test", "test1"); Message m2 = createMessage("test", "test", "test2"); Message m3 = createMessage("test", "test", "test3"); @@ -184,7 +183,7 @@ public void shouldProcessExceptionInPrepare() { @Test public void shouldNotCaptureSinkExecutionTelemetry() { - TestSink sink = new TestSink(instrumentation, "TestSink"); + TestSink sink = new TestSink(firehoseInstrumentation, "TestSink"); Message m1 = createMessage("test", "test", "test1"); Message m2 = createMessage("test", "test", "test2"); Message m3 = createMessage("test", "test", "test3"); @@ -198,7 +197,7 @@ public void shouldNotCaptureSinkExecutionTelemetry() { add(m4); }}); } catch (Exception e) { - Mockito.verify(instrumentation, Mockito.times(0)).captureSinkExecutionTelemetry(any(), any()); + Mockito.verify(firehoseInstrumentation, Mockito.times(0)).captureSinkExecutionTelemetry(any(), any()); } } diff --git a/src/test/java/io/odpf/firehose/sink/GenericOdpfSinkTest.java b/src/test/java/io/odpf/firehose/sink/GenericOdpfSinkTest.java new file mode 100644 index 000000000..1203a9130 --- /dev/null +++ b/src/test/java/io/odpf/firehose/sink/GenericOdpfSinkTest.java @@ -0,0 +1,85 @@ +package io.odpf.firehose.sink; + +import io.odpf.depot.OdpfSink; +import io.odpf.depot.OdpfSinkResponse; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; +import io.odpf.firehose.message.Message; +import io.odpf.firehose.metrics.FirehoseInstrumentation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class GenericOdpfSinkTest { + + private FirehoseInstrumentation instrumentation; + private OdpfSink odpfSink; + + @Before + public void setup() { + instrumentation = Mockito.mock(FirehoseInstrumentation.class); + odpfSink = Mockito.mock(OdpfSink.class); + } + + @Test + public void shouldReturnEmptyListOfMessagesWhenSuccess() throws Exception { + GenericOdpfSink sink = new GenericOdpfSink(instrumentation, "test", odpfSink); + Mockito.when(odpfSink.pushToSink(Mockito.anyList())).thenReturn(new OdpfSinkResponse()); + List messages = new ArrayList() {{ + Message m1 = new Message(new byte[1], new byte[1], "test", 1, 1); + Message m2 = new Message(new byte[1], new byte[1], "test", 1, 2); + Message m3 = new Message(new byte[1], new byte[1], "test", 1, 3); + Message m4 = new Message(new byte[1], new byte[1], "test", 1, 4); + Message m5 = new Message(new byte[1], new byte[1], "test", 1, 5); + Message m6 = new Message(new byte[1], new byte[1], "test", 1, 6); + add(m1); + add(m2); + add(m3); + add(m4); + add(m5); + add(m6); + }}; + sink.prepare(messages); + List failedMessages = sink.execute(); + Assert.assertEquals(Collections.emptyList(), failedMessages); + } + + @Test + public void shouldReturnFailedMessages() throws Exception { + GenericOdpfSink sink = new GenericOdpfSink(instrumentation, "test", odpfSink); + OdpfSinkResponse response = new OdpfSinkResponse(); + response.addErrors(5, new ErrorInfo(new Exception(), ErrorType.SINK_4XX_ERROR)); + response.addErrors(2, new ErrorInfo(new Exception(), ErrorType.DEFAULT_ERROR)); + response.addErrors(4, new ErrorInfo(new Exception(), ErrorType.DESERIALIZATION_ERROR)); + Mockito.when(odpfSink.pushToSink(Mockito.anyList())).thenReturn(response); + Message m1 = new Message(new byte[1], new byte[1], "test", 1, 1); + Message m2 = new Message(new byte[1], new byte[1], "test", 1, 2); + Message m3 = new Message(new byte[1], new byte[1], "test", 1, 3); + Message m4 = new Message(new byte[1], new byte[1], "test", 1, 4); + Message m5 = new Message(new byte[1], new byte[1], "test", 1, 5); + Message m6 = new Message(new byte[1], new byte[1], "test", 1, 6); + List messages = new ArrayList() {{ + add(m1); + add(m2); + add(m3); + add(m4); + add(m5); + add(m6); + }}; + sink.prepare(messages); + List failedMessages = sink.execute(); + Assert.assertEquals(3, failedMessages.size()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, failedMessages.get(0).getErrorInfo().getErrorType()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, failedMessages.get(1).getErrorInfo().getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, failedMessages.get(2).getErrorInfo().getErrorType()); + + Assert.assertEquals(3, failedMessages.get(0).getOffset()); + Assert.assertEquals(5, failedMessages.get(1).getOffset()); + Assert.assertEquals(6, failedMessages.get(2).getOffset()); + } +} diff --git a/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java b/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java new file mode 100644 index 000000000..3cf07d78f --- /dev/null +++ b/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java @@ -0,0 +1,24 @@ +package io.odpf.firehose.sink; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +public class SinkFactoryUtilsTest { + + @Test + public void shouldAddSinkConnectorConfigs() { + Map env = new HashMap() {{ + put("INPUT_SCHEMA_PROTO_CLASS", "com.test.SomeProtoClass"); + put("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "true"); + }}; + Map configs = SinkFactoryUtils.addAdditionalConfigsForSinkConnectors(env); + Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_MESSAGE_CLASS")); + Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_KEY_CLASS")); + Assert.assertEquals("firehose_", configs.get("SINK_METRICS_APPLICATION_PREFIX")); + Assert.assertEquals("true", configs.get("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE")); + Assert.assertEquals("LOG_MESSAGE", configs.get("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE")); + } +} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/BigQuerySinkTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/BigQuerySinkTest.java deleted file mode 100644 index 8344878db..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/BigQuerySinkTest.java +++ /dev/null @@ -1,210 +0,0 @@ -package io.odpf.firehose.sink.bigquery; - -import com.google.cloud.bigquery.BigQueryError; -import com.google.cloud.bigquery.InsertAllRequest; -import com.google.cloud.bigquery.InsertAllResponse; -import com.google.cloud.bigquery.TableId; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverter; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverterCache; -import io.odpf.firehose.sink.bigquery.exception.BigQuerySinkException; -import io.odpf.firehose.sink.bigquery.handler.BigQueryClient; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRow; -import io.odpf.firehose.sink.bigquery.handler.BigQueryRowWithInsertId; -import io.odpf.firehose.sink.bigquery.models.Record; -import io.odpf.firehose.sink.bigquery.models.Records; -import org.aeonbits.owner.util.Collections; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; - -import java.time.Instant; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - -public class BigQuerySinkTest { - - private final MessageUtils util = new MessageUtils(); - private final TableId tableId = TableId.of("test_dataset", "test_table"); - private final MessageRecordConverterCache converterCache = new MessageRecordConverterCache(); - private final BigQueryRow rowCreator = new BigQueryRowWithInsertId(); - @Mock - private BigQueryClient client; - @Mock - private Instrumentation instrumentation; - @Mock - private MessageRecordConverter converter; - private BigQuerySink sink; - @Mock - private InsertAllResponse response; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - this.converterCache.setMessageRecordConverter(converter); - this.sink = new BigQuerySink(instrumentation, "BIGQUERY", client, converterCache, rowCreator); - Mockito.when(client.getTableID()).thenReturn(tableId); - } - - @Test - public void shouldPushToBigQuerySink() throws Exception { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - OffsetInfo record3Offset = new OffsetInfo("topic1", 3, 103, Instant.now().toEpochMilli()); - OffsetInfo record4Offset = new OffsetInfo("topic1", 4, 104, Instant.now().toEpochMilli()); - OffsetInfo record5Offset = new OffsetInfo("topic1", 5, 104, Instant.now().toEpochMilli()); - OffsetInfo record6Offset = new OffsetInfo("topic1", 6, 104, Instant.now().toEpochMilli()); - Message message1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message message2 = util.withOffsetInfo(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - Message message3 = util.withOffsetInfo(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - Message message4 = util.withOffsetInfo(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - Message message5 = util.withOffsetInfo(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - Message message6 = util.withOffsetInfo(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); - sink.prepare(messages); - Record record1 = new Record(message1, new HashMap<>()); - Record record2 = new Record(message2, new HashMap<>()); - Record record3 = new Record(message3, new HashMap<>()); - Record record4 = new Record(message4, new HashMap<>()); - Record record5 = new Record(message5, new HashMap<>()); - Record record6 = new Record(message6, new HashMap<>()); - Records records = new Records(Collections.list(record1, record2, record3, record4, record5, record6), java.util.Collections.emptyList()); - - InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); - records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); - InsertAllRequest rows = builder.build(); - Mockito.when(converter.convert(Mockito.eq(messages), Mockito.any(Instant.class))).thenReturn(records); - Mockito.when(client.insertAll(rows)).thenReturn(response); - Mockito.when(response.hasErrors()).thenReturn(false); - List invalidMessages = sink.execute(); - Assert.assertEquals(0, invalidMessages.size()); - Mockito.verify(client, Mockito.times(1)).insertAll(rows); - } - - @Test - public void shouldReturnInvalidMessages() throws Exception { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - OffsetInfo record3Offset = new OffsetInfo("topic1", 3, 103, Instant.now().toEpochMilli()); - OffsetInfo record4Offset = new OffsetInfo("topic1", 4, 104, Instant.now().toEpochMilli()); - OffsetInfo record5Offset = new OffsetInfo("topic1", 5, 104, Instant.now().toEpochMilli()); - OffsetInfo record6Offset = new OffsetInfo("topic1", 6, 104, Instant.now().toEpochMilli()); - Message message1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message message2 = util.withOffsetInfo(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - Message message3 = util.withOffsetInfo(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - Message message4 = util.withOffsetInfo(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - Message message5 = util.withOffsetInfo(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - Message message6 = util.withOffsetInfo(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); - sink.prepare(messages); - Record record1 = new Record(message1, new HashMap<>()); - Record record2 = new Record(message2, new HashMap<>()); - Record record3 = new Record(message3, new HashMap<>()); - Record record4 = new Record(message4, new HashMap<>()); - Record record5 = new Record(message5, new HashMap<>()); - Record record6 = new Record(message6, new HashMap<>()); - Records records = new Records(Collections.list(record1, record3, record5, record6), Collections.list(record2, record4)); - - InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); - records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); - InsertAllRequest rows = builder.build(); - Mockito.when(converter.convert(Mockito.eq(messages), Mockito.any(Instant.class))).thenReturn(records); - Mockito.when(client.insertAll(rows)).thenReturn(response); - Mockito.when(response.hasErrors()).thenReturn(false); - List invalidMessages = sink.execute(); - Assert.assertEquals(2, invalidMessages.size()); - Mockito.verify(client, Mockito.times(1)).insertAll(rows); - - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-2") - .setOrderUrl("order-url-2") - .setOrderDetails("order-details-2") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(0).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-4") - .setOrderUrl("order-url-4") - .setOrderDetails("order-details-4") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(1).getLogMessage())); - Assert.assertNull(invalidMessages.get(0).getErrorInfo()); - Assert.assertNull(invalidMessages.get(1).getErrorInfo()); - } - - @Test - public void shouldReturnInvalidMessagesWithFailedInsertMessages() throws Exception { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - OffsetInfo record3Offset = new OffsetInfo("topic1", 3, 103, Instant.now().toEpochMilli()); - OffsetInfo record4Offset = new OffsetInfo("topic1", 4, 104, Instant.now().toEpochMilli()); - OffsetInfo record5Offset = new OffsetInfo("topic1", 5, 104, Instant.now().toEpochMilli()); - OffsetInfo record6Offset = new OffsetInfo("topic1", 6, 104, Instant.now().toEpochMilli()); - Message message1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message message2 = util.withOffsetInfo(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - Message message3 = util.withOffsetInfo(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - Message message4 = util.withOffsetInfo(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - Message message5 = util.withOffsetInfo(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - Message message6 = util.withOffsetInfo(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); - sink.prepare(messages); - Record record1 = new Record(message1, new HashMap<>()); - Record record2 = new Record(message2, new HashMap<>()); - Record record3 = new Record(message3, new HashMap<>()); - Record record4 = new Record(message4, new HashMap<>()); - Record record5 = new Record(message5, new HashMap<>()); - Record record6 = new Record(message6, new HashMap<>()); - Records records = new Records(Collections.list(record1, record3, record5, record6), Collections.list(record2, record4)); - - InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); - records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); - InsertAllRequest rows = builder.build(); - Mockito.when(converter.convert(Mockito.eq(messages), Mockito.any(Instant.class))).thenReturn(records); - Mockito.when(client.insertAll(rows)).thenReturn(response); - Mockito.when(response.hasErrors()).thenReturn(true); - - BigQueryError error1 = new BigQueryError("", "US", ""); - BigQueryError error3 = new BigQueryError("invalid", "", "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); - - Map> insertErrorsMap = new HashMap>() {{ - put(0L, Collections.list(error1)); - put(2L, Collections.list(error3)); - }}; - Mockito.when(response.getInsertErrors()).thenReturn(insertErrorsMap); - - List invalidMessages = sink.execute(); - Mockito.verify(client, Mockito.times(1)).insertAll(rows); - - Assert.assertEquals(4, invalidMessages.size()); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-2") - .setOrderUrl("order-url-2") - .setOrderDetails("order-details-2") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(0).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-4") - .setOrderUrl("order-url-4") - .setOrderDetails("order-details-4") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(1).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-1") - .setOrderUrl("order-url-1") - .setOrderDetails("order-details-1") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(2).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-5") - .setOrderUrl("order-url-5") - .setOrderDetails("order-details-5") - .build(), TestMessageBQ.parseFrom(invalidMessages.get(3).getLogMessage())); - Assert.assertNull(invalidMessages.get(0).getErrorInfo()); - Assert.assertNull(invalidMessages.get(1).getErrorInfo()); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR), invalidMessages.get(2).getErrorInfo()); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), invalidMessages.get(3).getErrorInfo()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtilsTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtilsTest.java new file mode 100644 index 000000000..186af6888 --- /dev/null +++ b/src/test/java/io/odpf/firehose/sink/bigquery/BigquerySinkUtilsTest.java @@ -0,0 +1,31 @@ +package io.odpf.firehose.sink.bigquery; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +public class BigquerySinkUtilsTest { + @Test + public void shouldGetRowIdCreator() { + Function, String> rowIDCreator = BigquerySinkUtils.getRowIDCreator(); + String rowId = rowIDCreator.apply(new HashMap() {{ + put("message_topic", "test"); + put("message_partition", 10); + put("message_offset", 2); + put("something_else", false); + }}); + Assert.assertEquals("test_10_2", rowId); + } + + @Test + public void shouldAddMetadataColumns() { + Map config = new HashMap() {{ + put("test", "test"); + }}; + BigquerySinkUtils.addMetadataColumns(config); + Assert.assertEquals(config.get("SINK_BIGQUERY_METADATA_COLUMNS_TYPES"), "message_offset=integer,message_topic=string,load_time=timestamp,message_timestamp=timestamp,message_partition=integer"); + } +} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/MessageUtils.java b/src/test/java/io/odpf/firehose/sink/bigquery/MessageUtils.java deleted file mode 100644 index ec7809f71..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/MessageUtils.java +++ /dev/null @@ -1,89 +0,0 @@ -package io.odpf.firehose.sink.bigquery; - -import com.google.api.client.util.DateTime; -import io.odpf.firehose.TestKeyBQ; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.message.Message; - -import java.sql.Date; -import java.time.Instant; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class MessageUtils { - private long timestamp; - private String topic; - private int partition; - private long offset; - - public MessageUtils() { - this.topic = "default-topic"; - this.partition = 1; - this.offset = 1; - this.timestamp = Instant.now().toEpochMilli(); - } - - public MessageUtils withOffset(int value) { - offset = value; - return this; - } - - public MessageUtils withPartition(int value) { - partition = value; - return this; - } - - public MessageUtils withTopic(String value) { - topic = value; - return this; - } - - public MessageUtils withOffsetInfo(OffsetInfo offsetInfo) { - this.topic = offsetInfo.getTopic(); - this.partition = offsetInfo.getPartition(); - this.offset = offsetInfo.getOffset(); - this.timestamp = offsetInfo.getTimestamp(); - return this; - } - - public Message createConsumerRecord(String orderNumber, String orderUrl, String orderDetails) { - TestKeyBQ key = TestKeyBQ.newBuilder() - .setOrderNumber(orderNumber) - .setOrderUrl(orderUrl) - .build(); - TestMessageBQ message = TestMessageBQ.newBuilder() - .setOrderNumber(orderNumber) - .setOrderUrl(orderUrl) - .setOrderDetails(orderDetails) - .build(); - return new Message(key.toByteArray(), message.toByteArray(), topic, partition, offset, null, timestamp, 0); - } - - public Message createEmptyValueConsumerRecord(String orderNumber, String orderUrl) { - TestKeyBQ key = TestKeyBQ.newBuilder() - .setOrderNumber(orderNumber) - .setOrderUrl(orderUrl) - .build(); - return new Message(key.toByteArray(), null, topic, partition, offset); - } - - public Map metadataColumns(OffsetInfo offsetInfo, Instant now) { - Map metadataColumns = new HashMap<>(); - metadataColumns.put("message_partition", offsetInfo.getPartition()); - metadataColumns.put("message_offset", offsetInfo.getOffset()); - metadataColumns.put("message_topic", offsetInfo.getTopic()); - metadataColumns.put("message_timestamp", new DateTime(offsetInfo.getTimestamp())); - metadataColumns.put("load_time", new DateTime(Date.from(now))); - return metadataColumns; - } - - public void assertMetadata(Map recordColumns, OffsetInfo offsetInfo, long nowEpochMillis) { - assertEquals("partition metadata mismatch", recordColumns.get("message_partition"), offsetInfo.getPartition()); - assertEquals("offset metadata mismatch", recordColumns.get("message_offset"), offsetInfo.getOffset()); - assertEquals("topic metadata mismatch", recordColumns.get("message_topic"), offsetInfo.getTopic()); - assertEquals("message timestamp metadata mismatch", recordColumns.get("message_timestamp"), new DateTime(offsetInfo.getTimestamp())); - assertEquals("load time metadata mismatch", recordColumns.get("load_time"), new DateTime(nowEpochMillis)); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/OffsetInfo.java b/src/test/java/io/odpf/firehose/sink/bigquery/OffsetInfo.java deleted file mode 100644 index b7bae311e..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/OffsetInfo.java +++ /dev/null @@ -1,17 +0,0 @@ -package io.odpf.firehose.sink.bigquery; - -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; - -@EqualsAndHashCode -@ToString -@Getter -@AllArgsConstructor -public class OffsetInfo { - private final String topic; - private final int partition; - private final long offset; - private final long timestamp; -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterTest.java deleted file mode 100644 index 91ab70361..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/MessageRecordConverterTest.java +++ /dev/null @@ -1,283 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.UnknownFieldSet; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; -import io.odpf.firehose.sink.bigquery.MessageUtils; -import io.odpf.firehose.sink.bigquery.OffsetInfo; -import io.odpf.firehose.sink.bigquery.models.Records; -import io.odpf.firehose.exception.UnknownFieldsException; -import io.odpf.stencil.StencilClientFactory; -import io.odpf.stencil.Parser; - -import org.aeonbits.owner.ConfigFactory; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import java.time.Instant; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class MessageRecordConverterTest { - private final MessageUtils util = new MessageUtils(); - private MessageRecordConverter recordConverter; - private RowMapper rowMapper; - private Parser parser; - private Instant now; - - @Before - public void setUp() { - parser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Properties columnMapping = new Properties(); - columnMapping.put(1, "bq_order_number"); - columnMapping.put(2, "bq_order_url"); - columnMapping.put(3, "bq_order_details"); - rowMapper = new RowMapper(columnMapping); - - recordConverter = new MessageRecordConverter(rowMapper, parser, - ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties())); - - now = Instant.now(); - } - - @Test - public void shouldGetRecordForBQFromConsumerRecords() { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message record2 = util.withOffsetInfo(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - - Map record2ExpectedColumns = new HashMap<>(); - record2ExpectedColumns.put("bq_order_number", "order-2"); - record2ExpectedColumns.put("bq_order_url", "order-url-2"); - record2ExpectedColumns.put("bq_order_details", "order-details-2"); - record2ExpectedColumns.putAll(util.metadataColumns(record2Offset, now)); - List messages = Arrays.asList(record1, record2); - - Records records = recordConverter.convert(messages, now); - - assertEquals(messages.size(), records.getValidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - Map record2Columns = records.getValidRecords().get(1).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record2ExpectedColumns.size(), record2Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - assertEquals(record2ExpectedColumns, record2Columns); - } - - @Test - public void shouldIgnoreNullRecords() { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message record2 = util.withOffsetInfo(record2Offset).createEmptyValueConsumerRecord("order-2", "order-url-2"); - - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - List messages = Arrays.asList(record1, record2); - Records records = recordConverter.convert(messages, now); - - assertEquals(1, records.getValidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - } - - public void shouldReturnInvalidRecordsWhenGivenNullRecords() { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - Message record2 = util.withOffsetInfo(record2Offset).createEmptyValueConsumerRecord("order-2", "order-url-2"); - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - List messages = Arrays.asList(record1, record2); - Records records = recordConverter.convert(messages, now); - - assertEquals(1, records.getValidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - } - - @Test - public void shouldNotNamespaceMetadataFieldWhenNamespaceIsNotProvided() { - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - MessageRecordConverter recordConverterTest = new MessageRecordConverter(rowMapper, parser, sinkConfig); - - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - List messages = Collections.singletonList(record1); - Records records = recordConverterTest.convert(messages, now); - - assertEquals(messages.size(), records.getValidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - assertEquals(sinkConfig.getBqMetadataNamespace(), ""); - } - - @Test - public void shouldNamespaceMetadataFieldWhenNamespaceIsProvided() { - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_ns"); - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - MessageRecordConverter recordConverterTest = new MessageRecordConverter(rowMapper, parser, sinkConfig); - - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.put(sinkConfig.getBqMetadataNamespace(), util.metadataColumns(record1Offset, now)); - - List messages = Collections.singletonList(record1); - Records records = recordConverterTest.convert(messages, now); - - assertEquals(messages.size(), records.getValidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); - } - - - public void shouldReturnInvalidRecordsGivenInvalidProtobufMessage() { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", - "order-url-1", "order-details-1"); - Message record2 = new Message("invalid-key".getBytes(), "invalid-value".getBytes(), - record2Offset.getTopic(), record2Offset.getPartition(), - record2Offset.getOffset(), null, record2Offset.getTimestamp(), 0); - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - List messages = Arrays.asList(record1, record2); - Records records = recordConverter.convert(messages, now); - assertEquals(1, records.getInvalidRecords()); - } - - @Test - public void shouldWriteToErrorWriterInvalidRecords() { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - Message record1 = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", - "order-url-1", "order-details-1"); - - Message record2 = new Message("invalid-key".getBytes(), "invalid-value".getBytes(), - record2Offset.getTopic(), record2Offset.getPartition(), - record2Offset.getOffset(), null, record2Offset.getTimestamp(), 0); - - Map record1ExpectedColumns = new HashMap<>(); - record1ExpectedColumns.put("bq_order_number", "order-1"); - record1ExpectedColumns.put("bq_order_url", "order-url-1"); - record1ExpectedColumns.put("bq_order_details", "order-details-1"); - record1ExpectedColumns.putAll(util.metadataColumns(record1Offset, now)); - - List messages = Arrays.asList(record1, record2); - Records records = recordConverter.convert(messages, now); - - assertEquals(1, records.getValidRecords().size()); - assertEquals(1, records.getInvalidRecords().size()); - Map record1Columns = records.getValidRecords().get(0).getColumns(); - assertEquals(record1ExpectedColumns.size(), record1Columns.size()); - assertEquals(record1ExpectedColumns, record1Columns); - } - - @Test - public void shouldReturnInvalidRecordsWhenUnknownFieldsFound() throws InvalidProtocolBufferException { - System.setProperty("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "false"); - Parser mockParser = mock(Parser.class); - - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - Message consumerRecord = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", - "order-url-1", "order-details-1"); - - DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestMessageBQ.getDescriptor()) - .setUnknownFields(UnknownFieldSet.newBuilder() - .addField(1, UnknownFieldSet.Field.getDefaultInstance()) - .build()) - .build(); - when(mockParser.parse(consumerRecord.getLogMessage())).thenReturn(dynamicMessage); - - recordConverter = new MessageRecordConverter(rowMapper, mockParser, - ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties())); - - List messages = Collections.singletonList(consumerRecord); - Records records = recordConverter.convert(messages, now); - consumerRecord.setErrorInfo(new ErrorInfo(new UnknownFieldsException(dynamicMessage), ErrorType.UNKNOWN_FIELDS_ERROR)); - assertEquals(consumerRecord, records.getInvalidRecords().get(0).getMessage()); - } - - @Test - public void shouldIgnoreUnknownFieldsIfTheConfigIsSet() throws InvalidProtocolBufferException { - System.setProperty("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "true"); - Parser mockParser = mock(Parser.class); - - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - Message consumerRecord = util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", - "order-url-1", "order-details-1"); - - DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestMessageBQ.getDescriptor()) - .setUnknownFields(UnknownFieldSet.newBuilder() - .addField(1, UnknownFieldSet.Field.getDefaultInstance()) - .build()) - .build(); - when(mockParser.parse(consumerRecord.getLogMessage())).thenReturn(dynamicMessage); - - recordConverter = new MessageRecordConverter(rowMapper, mockParser, - ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties())); - - List messages = Collections.singletonList(consumerRecord); - Records records = recordConverter.convert(messages, now); - assertEquals(1, records.getValidRecords().size()); - assertEquals(0, records.getInvalidRecords().size()); - assertEquals(consumerRecord, records.getValidRecords().get(0).getMessage()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/RowMapperTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/RowMapperTest.java deleted file mode 100644 index b09306954..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/RowMapperTest.java +++ /dev/null @@ -1,385 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter; - -import com.google.api.client.util.DateTime; -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ListValue; -import com.google.protobuf.Struct; -import com.google.protobuf.Timestamp; -import com.google.protobuf.Value; -import io.odpf.firehose.StatusBQ; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.TestNestedMessageBQ; -import io.odpf.firehose.TestNestedRepeatedMessageBQ; -import io.odpf.firehose.sink.bigquery.proto.ProtoUtil; -import io.odpf.stencil.DescriptorMapBuilder; -import io.odpf.stencil.StencilClientFactory; -import io.odpf.stencil.client.StencilClient; -import io.odpf.stencil.Parser; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.time.Instant; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class RowMapperTest { - - private Timestamp createdAt; - private DynamicMessage dynamicMessage; - private Instant now; - private long nowMillis; - private StencilClient stencilClientWithURL; - - @Before - public void setUp() throws IOException, Descriptors.DescriptorValidationException { - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - now = Instant.now(); - createdAt = Timestamp.newBuilder().setSeconds(now.getEpochSecond()).setNanos(now.getNano()).build(); - TestMessageBQ testMessage = TestMessageBQ.newBuilder() - .setOrderNumber("order-1") - .setOrderUrl("order-url") - .setOrderDetails("order-details") - .setCreatedAt(createdAt) - .setStatus(StatusBQ.COMPLETED) - .setOrderDate(com.google.type.Date.newBuilder().setYear(1996).setMonth(11).setDay(21)) - .build(); - - dynamicMessage = protoParser.parse(testMessage.toByteArray()); - nowMillis = Instant.ofEpochSecond(now.getEpochSecond(), now.getNano()).toEpochMilli(); - - ClassLoader classLoader = getClass().getClassLoader(); - InputStream fileInputStream = new FileInputStream(classLoader.getResource("__files/descriptors.bin").getFile()); - Map descriptorMap = DescriptorMapBuilder.buildFrom(fileInputStream); - stencilClientWithURL = mock(StencilClient.class); - when(stencilClientWithURL.get("io.odpf.firehose.TestMessageChildBQ")).thenReturn(descriptorMap.get("io.odpf.firehose.TestMessageChildBQ")); - } - - @Test - public void shouldReturnFieldsInProperties() { - Properties fieldMappings = new Properties(); - fieldMappings.put("1", "order_number_field"); - fieldMappings.put("2", "order_url_field"); - fieldMappings.put("3", "order_details_field"); - fieldMappings.put("4", "created_at"); - fieldMappings.put("5", "order_status"); - fieldMappings.put("14", getDateProperties()); - - Map fields = new RowMapper(fieldMappings).map(dynamicMessage); - - assertEquals("order-1", fields.get("order_number_field")); - assertEquals("order-url", fields.get("order_url_field")); - assertEquals("order-details", fields.get("order_details_field")); - assertEquals(new DateTime(nowMillis), fields.get("created_at")); - assertEquals("COMPLETED", fields.get("order_status")); - Map dateFields = (Map) fields.get("order_date_field"); - assertEquals(1996, dateFields.get("year")); - assertEquals(11, dateFields.get("month")); - assertEquals(21, dateFields.get("day")); - assertEquals(fieldMappings.size(), fields.size()); - } - - @Test - public void shouldParseDurationMessageSuccessfully() throws InvalidProtocolBufferException { - Properties fieldMappings = new Properties(); - Properties durationMappings = new Properties(); - durationMappings.put("record_name", "duration"); - durationMappings.put("1", "seconds"); - durationMappings.put("2", "nanos"); - fieldMappings.put("1", "duration_id"); - fieldMappings.put("11", durationMappings); - - TestMessageBQ message = ProtoUtil.generateTestMessage(now); - Parser messageProtoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(messageProtoParser.parse(message.toByteArray())); - Map durationFields = (Map) fields.get("duration"); - assertEquals("order-1", fields.get("duration_id")); - assertEquals((long) 1, durationFields.get("seconds")); - assertEquals(1000000000, durationFields.get("nanos")); - } - - @Test - public void shouldParseNestedMessageSuccessfully() { - Properties fieldMappings = new Properties(); - Properties nestedMappings = getTestMessageProperties(); - fieldMappings.put("1", "nested_id"); - fieldMappings.put("2", nestedMappings); - - TestMessageBQ message1 = ProtoUtil.generateTestMessage(now); - TestMessageBQ message2 = ProtoUtil.generateTestMessage(now); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedMessageBQ.class.getName()); - TestNestedMessageBQ nestedMessage1 = ProtoUtil.generateTestNestedMessage("nested-message-1", message1); - TestNestedMessageBQ nestedMessage2 = ProtoUtil.generateTestNestedMessage("nested-message-2", message2); - Arrays.asList(nestedMessage1, nestedMessage2).forEach(msg -> { - Map fields = null; - try { - fields = new RowMapper(fieldMappings).map(protoParser.parse(msg.toByteArray())); - } catch (InvalidProtocolBufferException e) { - e.printStackTrace(); - } - assertNestedMessage(msg, fields); - }); - } - - @Test - public void shouldParseRepeatedPrimitives() throws InvalidProtocolBufferException { - Properties fieldMappings = new Properties(); - fieldMappings.put("1", "order_number"); - fieldMappings.put("12", "aliases"); - - String orderNumber = "order-1"; - TestMessageBQ message = TestMessageBQ.newBuilder() - .setOrderNumber(orderNumber) - .setOrderUrl("order-url-1") - .addAliases("alias1").addAliases("alias2") - .build(); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(orderNumber, fields.get("order_number")); - assertEquals(Arrays.asList("alias1", "alias2"), fields.get("aliases")); - } - - @Test - public void shouldParseRepeatedNestedMessages() throws InvalidProtocolBufferException { - int number = 1234; - TestMessageBQ nested1 = ProtoUtil.generateTestMessage(now); - TestMessageBQ nested2 = ProtoUtil.generateTestMessage(now); - TestNestedRepeatedMessageBQ message = TestNestedRepeatedMessageBQ.newBuilder() - .setNumberField(number) - .addRepeatedMessage(nested1) - .addRepeatedMessage(nested2) - .build(); - - - Properties fieldMappings = new Properties(); - fieldMappings.put("3", "number_field"); - fieldMappings.put("2", getTestMessageProperties()); - - - Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedRepeatedMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(number, fields.get("number_field")); - List repeatedMessagesMap = (List) fields.get("msg"); - assertTestMessageFields((Map) repeatedMessagesMap.get(0), nested1); - assertTestMessageFields((Map) repeatedMessagesMap.get(1), nested2); - } - - @Test - public void shouldParseRepeatedNestedMessagesIfRepeatedFieldsAreMissing() throws InvalidProtocolBufferException { - int number = 1234; - TestMessageBQ nested1 = ProtoUtil.generateTestMessage(now); - TestMessageBQ nested2 = ProtoUtil.generateTestMessage(now); - TestNestedRepeatedMessageBQ message = TestNestedRepeatedMessageBQ.newBuilder() - .setNumberField(number) - .build(); - - - Properties fieldMappings = new Properties(); - fieldMappings.put("3", "number_field"); - fieldMappings.put("2", getTestMessageProperties()); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedRepeatedMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(number, fields.get("number_field")); - assertEquals(1, fields.size()); - } - - @Test - public void shouldParseMapFields() throws InvalidProtocolBufferException { - TestMessageBQ message = TestMessageBQ.newBuilder() - .setOrderNumber("order-1") - .setOrderUrl("order-url-1") - .setOrderDetails("order-details-1") - .putCurrentState("state_key_1", "state_value_1") - .putCurrentState("state_key_2", "state_value_2") - .build(); - - Properties fieldMappings = new Properties(); - fieldMappings.put("1", "order_number_field"); - fieldMappings.put("2", "order_url_field"); - Properties currStateMapping = new Properties(); - currStateMapping.put("record_name", "current_state"); - currStateMapping.put("1", "key"); - currStateMapping.put("2", "value"); - fieldMappings.put("9", currStateMapping); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(message.getOrderNumber(), fields.get("order_number_field")); - assertEquals(message.getOrderUrl(), fields.get("order_url_field")); - List repeatedStateMap = (List) fields.get("current_state"); - assertEquals("state_key_1", ((Map) repeatedStateMap.get(0)).get("key")); - assertEquals("state_value_1", ((Map) repeatedStateMap.get(0)).get("value")); - assertEquals("state_key_2", ((Map) repeatedStateMap.get(1)).get("key")); - assertEquals("state_value_2", ((Map) repeatedStateMap.get(1)).get("value")); - } - - @Test - public void shouldMapStructFields() throws InvalidProtocolBufferException { - ListValue.Builder builder = ListValue.newBuilder(); - ListValue listValue = builder - .addValues(Value.newBuilder().setNumberValue(1).build()) - .addValues(Value.newBuilder().setNumberValue(2).build()) - .addValues(Value.newBuilder().setNumberValue(3).build()) - .build(); - Struct value = Struct.newBuilder() - .putFields("number", Value.newBuilder().setNumberValue(123.45).build()) - .putFields("string", Value.newBuilder().setStringValue("string_val").build()) - .putFields("list", Value.newBuilder().setListValue(listValue).build()) - .putFields("boolean", Value.newBuilder().setBoolValue(true).build()) - .build(); - - TestMessageBQ message = TestMessageBQ.newBuilder() - .setOrderNumber("order-1") - .setProperties(value) - .build(); - - Properties fieldMappings = new Properties(); - fieldMappings.put("1", "order_number_field"); - fieldMappings.put("13", "properties"); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(message.getOrderNumber(), fields.get("order_number_field")); - String expectedProperties = "{\"number\":123.45,\"string\":\"string_val\",\"list\":[1.0,2.0,3.0],\"boolean\":true}"; - assertEquals(expectedProperties, fields.get("properties")); - } - - private void assertNestedMessage(TestNestedMessageBQ msg, Map fields) { - assertEquals(msg.getNestedId(), fields.get("nested_id")); - Map nestedFields = (Map) fields.get("msg"); - assertNotNull(nestedFields); - TestMessageBQ message = msg.getSingleMessage(); - assertTestMessageFields(nestedFields, message); - } - - private void assertTestMessageFields(Map nestedFields, TestMessageBQ message) { - assertEquals(message.getOrderNumber(), nestedFields.get("order_number_field")); - assertEquals(message.getOrderUrl(), nestedFields.get("order_url_field")); - assertEquals(message.getOrderDetails(), nestedFields.get("order_details_field")); - assertEquals(new DateTime(nowMillis), nestedFields.get("created_at_field")); - assertEquals(message.getStatus().toString(), nestedFields.get("status_field")); - } - - private Properties getTestMessageProperties() { - Properties nestedMappings = new Properties(); - nestedMappings.put("record_name", "msg"); - nestedMappings.put("1", "order_number_field"); - nestedMappings.put("2", "order_url_field"); - nestedMappings.put("3", "order_details_field"); - nestedMappings.put("4", "created_at_field"); - nestedMappings.put("5", "status_field"); - return nestedMappings; - } - - private Properties getDateProperties() { - Properties nestedMappings = new Properties(); - nestedMappings.put("record_name", "order_date_field"); - nestedMappings.put("1", "year"); - nestedMappings.put("2", "month"); - nestedMappings.put("3", "day"); - return nestedMappings; - } - - @Test() - public void shouldReturnNullWhenIndexNotPresent() { - Properties fieldMappings = new Properties(); - fieldMappings.put("100", "some_column_in_bq"); - - Map fields = new RowMapper(fieldMappings).map(dynamicMessage); - assertNull(fields.get("some_column_in_bq")); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionWhenConfigNotPresent() { - Properties fieldMappings = new Properties(); - fieldMappings.put("10", "some_column_in_bq"); - - new RowMapper(null).map(dynamicMessage); - } - - @Test - public void shouldReturnNullWhenNoDateFieldIsProvided() throws InvalidProtocolBufferException { - TestMessageBQ testMessage = TestMessageBQ.newBuilder() - .build(); - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - dynamicMessage = protoParser.parse(testMessage.toByteArray()); - Properties fieldMappings = new Properties(); - fieldMappings.put("14", getDateProperties()); - - Map fields = new RowMapper(fieldMappings).map(dynamicMessage); - - assertNull(fields.get("order_date_field")); - } - - @Test - public void shouldParseRepeatedTimestamp() throws InvalidProtocolBufferException { - Properties fieldMappings = new Properties(); - fieldMappings.put("15", "updated_at"); - createdAt = Timestamp.newBuilder().setSeconds(now.getEpochSecond()).setNanos(now.getNano()).build(); - - TestMessageBQ message = TestMessageBQ.newBuilder() - .addUpdatedAt(createdAt).addUpdatedAt(createdAt) - .build(); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(Arrays.asList(new DateTime(now.toEpochMilli()), new DateTime(now.toEpochMilli())), fields.get("updated_at")); - } - - @Test - public void shouldParseStructField() throws InvalidProtocolBufferException { - Properties fieldMappings = new Properties(); - fieldMappings.put("13", "properties"); - - TestMessageBQ message = TestMessageBQ.newBuilder() - .setProperties(Struct.newBuilder().putFields("name", Value.newBuilder().setStringValue("John").build()) - .putFields("age", Value.newBuilder().setStringValue("50").build()).build()) - .build(); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals("{\"name\":\"John\",\"age\":\"50\"}", fields.get("properties")); - } - - @Test - public void shouldParseRepeatableStructField() throws InvalidProtocolBufferException { - Properties fieldMappings = new Properties(); - fieldMappings.put("16", "attributes"); - Value val = Value.newBuilder().setStringValue("test").build(); - - TestMessageBQ message = TestMessageBQ.newBuilder() - .addAttributes(Struct.newBuilder().putFields("name", Value.newBuilder().setStringValue("John").build()) - .putFields("age", Value.newBuilder().setStringValue("50").build()).build()) - .addAttributes(Struct.newBuilder().putFields("name", Value.newBuilder().setStringValue("John").build()) - .putFields("age", Value.newBuilder().setStringValue("60").build()).build()) - .build(); - - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - Map fields = new RowMapper(fieldMappings).map(protoParser.parse(message.toByteArray())); - - assertEquals(Arrays.asList("{\"name\":\"John\",\"age\":\"50\"}", "{\"name\":\"John\",\"age\":\"60\"}"), fields.get("attributes")); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoFieldTest.java deleted file mode 100644 index 4a562871c..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/ByteProtoFieldTest.java +++ /dev/null @@ -1,51 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors; -import io.odpf.firehose.consumer.TestBytesMessage; -import org.junit.Before; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -import static org.junit.Assert.*; - - -public class ByteProtoFieldTest { - - private ByteProtoField byteProtoField; - private final String content = "downing street"; - - @Before - public void setUp() throws Exception { - TestBytesMessage bytesMessage = TestBytesMessage.newBuilder() - .setContent(ByteString.copyFromUtf8(content)) - .build(); - - Descriptors.FieldDescriptor fieldDescriptor = bytesMessage.getDescriptorForType().findFieldByName("content"); - byteProtoField = new ByteProtoField(fieldDescriptor, bytesMessage.getField(fieldDescriptor)); - } - - @Test - public void shouldConvertBytesToString() { - String parseResult = (String) byteProtoField.getValue(); - String encodedBytes = new String(Base64.getEncoder().encode(content.getBytes(StandardCharsets.UTF_8))); - assertEquals(encodedBytes, parseResult); - } - - @Test - public void shouldMatchByteProtobufField() { - assertTrue(byteProtoField.matches()); - } - - @Test - public void shouldNotMatchFieldOtherThanByteProtobufField() { - TestBytesMessage bytesMessage = TestBytesMessage.newBuilder() - .build(); - Descriptors.FieldDescriptor fieldDescriptor = bytesMessage.getDescriptorForType().findFieldByName("order_number"); - byteProtoField = new ByteProtoField(fieldDescriptor, bytesMessage.getField(fieldDescriptor)); - - assertFalse(byteProtoField.matches()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoFieldTest.java deleted file mode 100644 index 50ad4df11..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/DefaultProtoFieldTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.firehose.consumer.TestMessage; -import org.junit.Test; - -import static org.junit.Assert.*; - -public class DefaultProtoFieldTest { - - @Test - public void shouldReturnProtobufElementsAsItIs() throws InvalidProtocolBufferException { - String orderNumber = "123X"; - TestMessage testMessage = TestMessage.newBuilder().setOrderNumber(orderNumber).build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testMessage.getDescriptorForType(), testMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("order_number"); - DefaultProtoField defaultProtoField = new DefaultProtoField(dynamicMessage.getField(fieldDescriptor)); - Object value = defaultProtoField.getValue(); - - assertEquals(orderNumber, value); - } - - @Test - public void shouldNotMatchAnyType() { - DefaultProtoField defaultProtoField = new DefaultProtoField(null); - boolean isMatch = defaultProtoField.matches(); - assertFalse(isMatch); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoFieldTest.java deleted file mode 100644 index d2ce883ef..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/EnumProtoFieldTest.java +++ /dev/null @@ -1,56 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.firehose.consumer.TestEnumMessage; -import io.odpf.firehose.consumer.TestStatus; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class EnumProtoFieldTest { - - private EnumProtoField enumProtoField; - - @Before - public void setUp() throws Exception { - TestEnumMessage testEnumMessage = TestEnumMessage.newBuilder().setLastStatus(TestStatus.Enum.CREATED).build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), testEnumMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("last_status"); - enumProtoField = new EnumProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); - } - - @Test - public void shouldConvertProtobufEnumToString() { - String fieldValue = (String) enumProtoField.getValue(); - assertEquals("CREATED", fieldValue); - } - - @Test - public void shouldConvertRepeatedProtobufEnumToListOfString() throws InvalidProtocolBufferException { - TestEnumMessage testEnumMessage = TestEnumMessage.newBuilder() - .addStatusHistory(TestStatus.Enum.CREATED) - .addStatusHistory(TestStatus.Enum.IN_PROGRESS) - .build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), testEnumMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("status_history"); - enumProtoField = new EnumProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); - Object fieldValue = enumProtoField.getValue(); - - ArrayList enumValueList = new ArrayList<>(); - enumValueList.add("CREATED"); - enumValueList.add("IN_PROGRESS"); - assertEquals(enumValueList, fieldValue); - } - - @Test - public void shouldMatchEnumProtobufField() { - boolean isMatch = enumProtoField.matches(); - assertTrue(isMatch); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoFieldTest.java deleted file mode 100644 index 9ac8cd29b..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/NestedProtoFieldTest.java +++ /dev/null @@ -1,43 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.consumer.TestNestedMessage; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.*; - -public class NestedProtoFieldTest { - - private NestedProtoField nestedProtoField; - private TestMessage childField; - - @Before - public void setUp() throws Exception { - childField = TestMessage.newBuilder() - .setOrderNumber("123X") - .build(); - TestNestedMessage nestedMessage = TestNestedMessage.newBuilder() - .setSingleMessage(childField) - .build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(nestedMessage.getDescriptorForType(), nestedMessage.toByteArray()); - - Descriptors.FieldDescriptor fieldDescriptor = nestedMessage.getDescriptorForType().findFieldByName("single_message"); - nestedProtoField = new NestedProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); - - } - - @Test - public void shouldReturnDynamicMessage() { - DynamicMessage nestedChild = nestedProtoField.getValue(); - assertEquals(childField, nestedChild); - } - - @Test - public void shouldMatchDynamicMessageAsNested() { - boolean isMatch = nestedProtoField.matches(); - assertTrue(isMatch); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoFieldTest.java deleted file mode 100644 index a1e14e65d..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/StructProtoFieldTest.java +++ /dev/null @@ -1,99 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ListValue; -import com.google.protobuf.NullValue; -import com.google.protobuf.Struct; -import com.google.protobuf.Value; -import io.odpf.firehose.consumer.TestStructMessage; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class StructProtoFieldTest { - - private StructProtoField structProtoField; - private Struct structValue; - - @Before - public void setUp() throws Exception { - List listValues = new ArrayList<>(); - listValues.add(Value.newBuilder().setNumberValue(1).build()); - listValues.add(Value.newBuilder().setNumberValue(2).build()); - - structValue = Struct.newBuilder() - .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) - .build()) - .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) - .putFields("string_value", Value.newBuilder().setStringValue("").build()) - .putFields("bool_value", Value.newBuilder().setBoolValue(false).build()) - .putFields("struct_value", Value.newBuilder().setStructValue( - Struct.newBuilder().putFields("child_value1", Value.newBuilder().setNumberValue(1.0).build()) - .build()) - .build()) - .putFields("list_value", Value.newBuilder().setListValue(ListValue.newBuilder() - .addAllValues(listValues).build()).build()) - .build(); - TestStructMessage message = TestStructMessage.newBuilder() - .setOrderNumber("123X") - .setCustomFields(structValue) - .build(); - - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor customValues = dynamicMessage.getDescriptorForType().findFieldByName("custom_fields"); - structProtoField = new StructProtoField(customValues, dynamicMessage.getField(customValues)); - } - - @Test - public void shouldSerialiseStructIntoJson() { - String value = (String) structProtoField.getValue(); - String jsonStr = "{\"null_value\":null," - + "\"number_value\":2.0," - + "\"string_value\":\"\"," - + "\"bool_value\":false," - + "\"struct_value\":{\"child_value1\":1.0}," - + "\"list_value\":[1.0,2.0]}"; - - assertEquals(jsonStr, value); - } - - @Test - public void shouldSerialiseRepeatedStructsIntoJson() throws InvalidProtocolBufferException { - Struct simpleStruct = Struct.newBuilder() - .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) - .build()) - .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) - .build(); - - TestStructMessage message = TestStructMessage.newBuilder() - .setOrderNumber("123X") - .addListCustomFields(simpleStruct) - .addListCustomFields(simpleStruct) - .build(); - - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor listCustomFieldsDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("list_custom_fields"); - structProtoField = new StructProtoField(listCustomFieldsDescriptor, dynamicMessage.getField(listCustomFieldsDescriptor)); - - Object value = structProtoField.getValue(); - - List jsonStrList = new ArrayList<>(); - jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); - jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); - - assertEquals(jsonStrList, value); - } - - @Test - public void shouldMatchStruct() { - boolean isMatch = structProtoField.matches(); - assertTrue(isMatch); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoFieldTest.java deleted file mode 100644 index 9300ada86..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/converter/fields/TimestampProtoFieldTest.java +++ /dev/null @@ -1,46 +0,0 @@ -package io.odpf.firehose.sink.bigquery.converter.fields; - -import com.google.api.client.util.DateTime; -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Timestamp; -import io.odpf.firehose.consumer.TestDurationMessage; -import org.junit.Before; -import org.junit.Test; - -import java.time.Instant; - -import static org.junit.Assert.*; - -public class TimestampProtoFieldTest { - private TimestampProtoField timestampProtoField; - private Instant time; - - @Before - public void setUp() throws Exception { - time = Instant.ofEpochSecond(200, 200); - TestDurationMessage message = TestDurationMessage.newBuilder() - .setEventTimestamp(Timestamp.newBuilder() - .setSeconds(time.getEpochSecond()) - .setNanos(time.getNano()) - .build()) - .build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("event_timestamp"); - timestampProtoField = new TimestampProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); - } - - @Test - public void shouldParseGoogleProtobufTimestampProtoMessageToDateTime() throws InvalidProtocolBufferException { - DateTime dateTimeResult = (DateTime) timestampProtoField.getValue(); - - DateTime expected = new DateTime(time.toEpochMilli()); - assertEquals(expected, dateTimeResult); - } - - @Test - public void shouldMatchGoogleProtobufTimestamp() { - assertTrue(timestampProtoField.matches()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQClientTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQClientTest.java deleted file mode 100644 index e3c7b3910..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQClientTest.java +++ /dev/null @@ -1,408 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.BigQuery; -import com.google.cloud.bigquery.BigQueryException; -import com.google.cloud.bigquery.Dataset; -import com.google.cloud.bigquery.DatasetInfo; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.StandardTableDefinition; -import com.google.cloud.bigquery.Table; -import com.google.cloud.bigquery.TableDefinition; -import com.google.cloud.bigquery.TableId; -import com.google.cloud.bigquery.TableInfo; -import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.bigquery.models.Constants; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import java.util.ArrayList; - -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class BQClientTest { - @Mock - private BigQuery bigquery; - @Mock - private BigQuerySinkConfig bqConfig; - @Mock - private Dataset dataset; - @Mock - private Table table; - @Mock - private StandardTableDefinition mockTableDefinition; - @Mock - private TimePartitioning mockTimePartitioning; - private BigQueryClient bqClient; - - @Mock - private Instrumentation instrumentation; - - @Test - public void shouldIgnoreExceptionIfDatasetAlreadyExists() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("partition_column"); - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(-1L); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("partition_column", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getPartitionedTableDefinition(bqSchemaFields); - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(false); - when(table.exists()).thenReturn(false); - when(bigquery.getTable(tableId)).thenReturn(table); - when(bigquery.create(tableInfo)).thenReturn(table); - - bqClient.upsertTable(bqSchemaFields); - verify(bigquery).create(DatasetInfo.newBuilder(tableId.getDataset()).setLocation("US").build()); - verify(bigquery).create(tableInfo); - verify(bigquery, never()).update(tableInfo); - } - - @Test - public void shouldUpsertWithRetries() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getNonPartitionedTableDefinition(bqSchemaFields); - ArrayList updatedBQSchemaFields = new ArrayList<>(bqSchemaFields); - updatedBQSchemaFields.add(Field.newBuilder("new-field", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - TableDefinition updatedBQTableDefinition = getNonPartitionedTableDefinition(updatedBQSchemaFields); - - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, updatedBQTableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(true); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.getDefinition()).thenReturn(mockTableDefinition); - when(mockTableDefinition.getSchema()).thenReturn(tableDefinition.getSchema()); - when(bigquery.update(tableInfo)) - .thenThrow(new BigQueryException(500, " Error while updating bigquery table on callback:Exceeded rate limits: too many table update operations")) - .thenThrow(new BigQueryException(500, " Error while updating bigquery table on callback:Exceeded rate limits: too many table update operations")) - .thenThrow(new BigQueryException(500, " Error while updating bigquery table on callback:Exceeded rate limits: too many table update operations")) - .thenReturn(table); - bqClient.upsertTable(updatedBQSchemaFields); - verify(bigquery, never()).create(tableInfo); - verify(bigquery, times(4)).update(tableInfo); - } - - @Test - public void shouldCreateBigqueryTableWithPartition() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("partition_column"); - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(-1L); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("partition_column", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - TableDefinition tableDefinition = getPartitionedTableDefinition(bqSchemaFields); - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(false); - when(bigquery.getTable(tableId)).thenReturn(table); - when(bigquery.create(tableInfo)).thenReturn(table); - - bqClient.upsertTable(bqSchemaFields); - verify(bigquery).create(tableInfo); - verify(bigquery, never()).update(tableInfo); - } - - @Test - public void shouldCreateBigqueryTableWithoutPartition() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getNonPartitionedTableDefinition(bqSchemaFields); - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(false); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.exists()).thenReturn(false); - when(bigquery.create(tableInfo)).thenReturn(table); - - bqClient.upsertTable(bqSchemaFields); - - verify(bigquery).create(tableInfo); - verify(bigquery, never()).update(tableInfo); - } - - @Test - public void shouldNotUpdateTableIfTableAlreadyExistsWithSameSchema() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getNonPartitionedTableDefinition(bqSchemaFields); - - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(true); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.getDefinition()).thenReturn(mockTableDefinition); - when(mockTableDefinition.getType()).thenReturn(TableDefinition.Type.TABLE); - when(mockTableDefinition.getSchema()).thenReturn(tableDefinition.getSchema()); - when(table.exists()).thenReturn(true); - - bqClient.upsertTable(bqSchemaFields); - verify(bigquery, never()).create(tableInfo); - verify(bigquery, never()).update(tableInfo); - } - - @Test - public void shouldUpdateTableIfTableAlreadyExistsAndSchemaChanges() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getNonPartitionedTableDefinition(bqSchemaFields); - ArrayList updatedBQSchemaFields = new ArrayList<>(bqSchemaFields); - updatedBQSchemaFields.add(Field.newBuilder("new-field", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - TableDefinition updatedBQTableDefinition = getNonPartitionedTableDefinition(updatedBQSchemaFields); - - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, updatedBQTableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(true); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.getDefinition()).thenReturn(mockTableDefinition); - when(mockTableDefinition.getSchema()).thenReturn(tableDefinition.getSchema()); - when(bigquery.update(tableInfo)).thenReturn(table); - - bqClient.upsertTable(updatedBQSchemaFields); - verify(bigquery, never()).create(tableInfo); - verify(bigquery).update(tableInfo); - } - - @Test - public void shouldUpdateTableIfTableNeedsToSetPartitionExpiry() { - long partitionExpiry = 5184000000L; - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); - when(bqConfig.getTablePartitionKey()).thenReturn("partition_column"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("partition_column", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getPartitionedTableDefinition(bqSchemaFields); - - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(true); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.getDefinition()).thenReturn(mockTableDefinition); - when(mockTableDefinition.getType()).thenReturn(TableDefinition.Type.TABLE); - when(mockTableDefinition.getTimePartitioning()).thenReturn(mockTimePartitioning); - when(mockTimePartitioning.getExpirationMs()).thenReturn(null); - when(mockTableDefinition.getSchema()).thenReturn(tableDefinition.getSchema()); - when(table.exists()).thenReturn(true); - - bqClient.upsertTable(bqSchemaFields); - verify(bigquery, never()).create(tableInfo); - verify(bigquery).update(tableInfo); - } - - @Test(expected = BigQueryException.class) - public void shouldThrowExceptionIfUpdateTableFails() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getNonPartitionedTableDefinition(bqSchemaFields); - ArrayList updatedBQSchemaFields = new ArrayList<>(bqSchemaFields); - updatedBQSchemaFields.add(Field.newBuilder("new-field", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - TableDefinition updatedBQTableDefinition = getNonPartitionedTableDefinition(updatedBQSchemaFields); - - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, updatedBQTableDefinition).build(); - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - when(table.exists()).thenReturn(true); - when(bigquery.getTable(tableId)).thenReturn(table); - when(table.getDefinition()).thenReturn(mockTableDefinition); - when(mockTableDefinition.getSchema()).thenReturn(tableDefinition.getSchema()); - when(bigquery.update(tableInfo)).thenThrow(new BigQueryException(404, "Failed to update")); - - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - bqClient.upsertTable(updatedBQSchemaFields); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfDatasetLocationIsChanged() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(-1L); - when(bqConfig.getTableName()).thenReturn("bq-table"); - when(bqConfig.getDatasetName()).thenReturn("bq-proto"); - when(bqConfig.getBigQueryDatasetLocation()).thenReturn("new-location"); - bqClient = new BigQueryClient(bigquery, bqConfig, instrumentation); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("test-2", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.OFFSET_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TOPIC_COLUMN_NAME, LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.LOAD_TIME_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.TIMESTAMP_COLUMN_NAME, LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder(Constants.PARTITION_COLUMN_NAME, LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; - - TableDefinition tableDefinition = getPartitionedTableDefinition(bqSchemaFields); - TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); - TableInfo tableInfo = TableInfo.newBuilder(tableId, tableDefinition).build(); - - when(bigquery.getDataset(tableId.getDataset())).thenReturn(dataset); - when(dataset.exists()).thenReturn(true); - when(dataset.getLocation()).thenReturn("US"); - - bqClient.upsertTable(bqSchemaFields); - verify(bigquery, never()).create(tableInfo); - verify(bigquery, never()).update(tableInfo); - } - - private TableDefinition getPartitionedTableDefinition(ArrayList bqSchemaFields) { - TimePartitioning.Builder timePartitioningBuilder = TimePartitioning.newBuilder(TimePartitioning.Type.DAY); - timePartitioningBuilder.setField(bqConfig.getTablePartitionKey()) - .setRequirePartitionFilter(true); - - if (bqConfig.getBigQueryTablePartitionExpiryMS() > 0) { - timePartitioningBuilder.setExpirationMs(bqConfig.getBigQueryTablePartitionExpiryMS()); - } - - Schema schema = Schema.of(bqSchemaFields); - - return StandardTableDefinition.newBuilder() - .setSchema(schema) - .setTimePartitioning(timePartitioningBuilder.build()) - .build(); - } - - private TableDefinition getNonPartitionedTableDefinition(ArrayList bqSchemaFields) { - Schema schema = Schema.of(bqSchemaFields); - - return StandardTableDefinition.newBuilder() - .setSchema(schema) - .build(); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinitionTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinitionTest.java deleted file mode 100644 index 843f5e96b..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BQTableDefinitionTest.java +++ /dev/null @@ -1,132 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.StandardTableDefinition; -import io.odpf.firehose.config.BigQuerySinkConfig; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class BQTableDefinitionTest { - @Mock - private BigQuerySinkConfig bqConfig; - - @Test(expected = UnsupportedOperationException.class) - public void shouldThrowUnsupportedExceptionForRangePartition() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("int_field"); - - Schema bqSchema = Schema.of( - Field.newBuilder("int_field", LegacySQLTypeName.INTEGER).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - bqTableDefinition.getTableDefinition(bqSchema); - } - - @Test - public void shouldReturnTableDefinitionIfPartitionDisabled() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(false); - Schema bqSchema = Schema.of( - Field.newBuilder("int_field", LegacySQLTypeName.INTEGER).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - Schema returnedSchema = tableDefinition.getSchema(); - assertEquals(returnedSchema.getFields().size(), bqSchema.getFields().size()); - assertEquals(returnedSchema.getFields().get(0).getName(), bqSchema.getFields().get(0).getName()); - assertEquals(returnedSchema.getFields().get(0).getMode(), bqSchema.getFields().get(0).getMode()); - assertEquals(returnedSchema.getFields().get(0).getType(), bqSchema.getFields().get(0).getType()); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowErrorIfPartitionFieldNotSet() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - Schema bqSchema = Schema.of( - Field.newBuilder("int_field", LegacySQLTypeName.INTEGER).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - tableDefinition.getSchema(); - } - - @Test - public void shouldCreatePartitionedTable() { - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); - Schema bqSchema = Schema.of( - Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - - Schema returnedSchema = tableDefinition.getSchema(); - assertEquals(returnedSchema.getFields().size(), bqSchema.getFields().size()); - assertEquals(returnedSchema.getFields().get(0).getName(), bqSchema.getFields().get(0).getName()); - assertEquals(returnedSchema.getFields().get(0).getMode(), bqSchema.getFields().get(0).getMode()); - assertEquals(returnedSchema.getFields().get(0).getType(), bqSchema.getFields().get(0).getType()); - assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); - } - - @Test - public void shouldCreateTableWithPartitionExpiry() { - long partitionExpiry = 5184000000L; - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); - Schema bqSchema = Schema.of( - Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - - assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); - assertEquals(partitionExpiry, tableDefinition.getTimePartitioning().getExpirationMs().longValue()); - } - - @Test - public void shouldReturnTableWithNullPartitionExpiryIfLessThanZero() { - long partitionExpiry = -1L; - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); - Schema bqSchema = Schema.of( - Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - - assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); - assertEquals(null, tableDefinition.getTimePartitioning().getExpirationMs()); - } - - @Test - public void shouldReturnTableWithNullPartitionExpiryIfEqualsZero() { - long partitionExpiry = 0L; - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); - Schema bqSchema = Schema.of( - Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build() - ); - - BQTableDefinition bqTableDefinition = new BQTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bqTableDefinition.getTableDefinition(bqSchema); - - assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); - assertEquals(null, tableDefinition.getTimePartitioning().getExpirationMs()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParserTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParserTest.java deleted file mode 100644 index 3da170bad..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryResponseParserTest.java +++ /dev/null @@ -1,104 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.BigQueryError; -import com.google.cloud.bigquery.InsertAllResponse; -import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; -import io.odpf.firehose.metrics.BigQueryMetrics; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.bigquery.MessageUtils; -import io.odpf.firehose.sink.bigquery.OffsetInfo; -import io.odpf.firehose.sink.bigquery.exception.BigQuerySinkException; -import io.odpf.firehose.sink.bigquery.models.Record; -import org.aeonbits.owner.util.Collections; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; - -import java.time.Instant; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class BigQueryResponseParserTest { - - private final MessageUtils util = new MessageUtils(); - @Mock - private InsertAllResponse response; - - @Mock - private Instrumentation instrumentation; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void shouldParseResponse() throws InvalidProtocolBufferException { - OffsetInfo record1Offset = new OffsetInfo("topic1", 1, 101, Instant.now().toEpochMilli()); - OffsetInfo record2Offset = new OffsetInfo("topic1", 2, 102, Instant.now().toEpochMilli()); - OffsetInfo record3Offset = new OffsetInfo("topic1", 3, 103, Instant.now().toEpochMilli()); - OffsetInfo record4Offset = new OffsetInfo("topic1", 4, 104, Instant.now().toEpochMilli()); - OffsetInfo record5Offset = new OffsetInfo("topic1", 5, 104, Instant.now().toEpochMilli()); - OffsetInfo record6Offset = new OffsetInfo("topic1", 6, 104, Instant.now().toEpochMilli()); - Record record1 = new Record(util.withOffsetInfo(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"), new HashMap<>()); - Record record2 = new Record(util.withOffsetInfo(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"), new HashMap<>()); - Record record3 = new Record(util.withOffsetInfo(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"), new HashMap<>()); - Record record4 = new Record(util.withOffsetInfo(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"), new HashMap<>()); - Record record5 = new Record(util.withOffsetInfo(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"), new HashMap<>()); - Record record6 = new Record(util.withOffsetInfo(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"), new HashMap<>()); - List records = Collections.list(record1, record2, record3, record4, record5, record6); - BigQueryError error1 = new BigQueryError("", "US", ""); - BigQueryError error2 = new BigQueryError("invalid", "US", "no such field"); - BigQueryError error3 = new BigQueryError("invalid", "", "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); - BigQueryError error4 = new BigQueryError("stopped", "", ""); - - Map> insertErrorsMap = new HashMap>() {{ - put(0L, Collections.list(error1)); - put(1L, Collections.list(error2)); - put(2L, Collections.list(error3)); - put(3L, Collections.list(error4)); - }}; - Mockito.when(response.hasErrors()).thenReturn(true); - Mockito.when(response.getInsertErrors()).thenReturn(insertErrorsMap); - List messages = BigQueryResponseParser.parseResponse(records, response, instrumentation); - - Assert.assertEquals(4, messages.size()); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-1") - .setOrderUrl("order-url-1") - .setOrderDetails("order-details-1") - .build(), TestMessageBQ.parseFrom(messages.get(0).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-2") - .setOrderUrl("order-url-2") - .setOrderDetails("order-details-2") - .build(), TestMessageBQ.parseFrom(messages.get(1).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-3") - .setOrderUrl("order-url-3") - .setOrderDetails("order-details-3") - .build(), TestMessageBQ.parseFrom(messages.get(2).getLogMessage())); - Assert.assertEquals(TestMessageBQ.newBuilder() - .setOrderNumber("order-4") - .setOrderUrl("order-url-4") - .setOrderDetails("order-details-4") - .build(), TestMessageBQ.parseFrom(messages.get(3).getLogMessage())); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR), messages.get(0).getErrorInfo()); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), messages.get(1).getErrorInfo()); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), messages.get(2).getErrorInfo()); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR), messages.get(3).getErrorInfo()); - - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(BigQueryMetrics.SINK_BIGQUERY_ERRORS_TOTAL, String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertIdTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertIdTest.java deleted file mode 100644 index 7050956a6..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithInsertIdTest.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.sink.bigquery.models.Record; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.HashMap; - -import static org.junit.Assert.assertEquals; - - -public class BigQueryRowWithInsertIdTest { - - @Test - public void shouldCreateRowWithInsertID() { - Message message = new Message("key".getBytes(StandardCharsets.UTF_8), "value".getBytes(StandardCharsets.UTF_8), "default", 1, 1); - Record record = new Record(message, new HashMap<>()); - - BigQueryRowWithInsertId withInsertId = new BigQueryRowWithInsertId(); - InsertAllRequest.RowToInsert rowToInsert = withInsertId.of(record); - String id = rowToInsert.getId(); - - assertEquals("default_1_1", id); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertIdTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertIdTest.java deleted file mode 100644 index 4069d9abe..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/handler/BigQueryRowWithoutInsertIdTest.java +++ /dev/null @@ -1,26 +0,0 @@ -package io.odpf.firehose.sink.bigquery.handler; - -import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.sink.bigquery.models.Record; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.HashMap; - -import static org.junit.Assert.*; - -public class BigQueryRowWithoutInsertIdTest { - - @Test - public void shouldCreateRowWithoutInsertID() { - Message message = new Message("key".getBytes(StandardCharsets.UTF_8), "value".getBytes(StandardCharsets.UTF_8), "default", 1, 1); - Record record = new Record(message, new HashMap<>()); - - BigQueryRowWithoutInsertId withoutInsertId = new BigQueryRowWithoutInsertId(); - InsertAllRequest.RowToInsert rowToInsert = withoutInsertId.of(record); - String id = rowToInsert.getId(); - - assertNull(id); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/models/BQFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/models/BQFieldTest.java deleted file mode 100644 index 553b1155b..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/models/BQFieldTest.java +++ /dev/null @@ -1,295 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.FieldList; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.Descriptors; -import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.consumer.TestTypesMessage; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.*; - -public class BQFieldTest { - - private Descriptors.Descriptor testMessageDescriptor = TestTypesMessage.newBuilder().build().getDescriptorForType(); - - @Test - public void shouldReturnBigqueryField() { - String fieldName = "double_value"; - Field expected = Field.newBuilder(fieldName, LegacySQLTypeName.FLOAT).setMode(Field.Mode.NULLABLE).build(); - Field field = fieldDescriptorToField(testMessageDescriptor.findFieldByName(fieldName)); - - assertEquals(expected, field); - } - - @Test - public void shouldReturnBigqueryFieldWithChildField() { - String fieldName = "message_value"; - - TestMessage testMessage = TestMessage.newBuilder().build(); - Descriptors.FieldDescriptor orderNumber = testMessage.getDescriptorForType().findFieldByName("order_number"); - Descriptors.FieldDescriptor orderUrl = testMessage.getDescriptorForType().findFieldByName("order_url"); - Descriptors.FieldDescriptor orderDetails = testMessage.getDescriptorForType().findFieldByName("order_details"); - - List childFields = new ArrayList<>(); - childFields.add(fieldDescriptorToField(orderNumber)); - childFields.add(fieldDescriptorToField(orderUrl)); - childFields.add(fieldDescriptorToField(orderDetails)); - - Descriptors.FieldDescriptor messageFieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField bqField = fieldDescriptorToBQField(messageFieldDescriptor); - bqField.setSubFields(childFields); - Field field = bqField.getField(); - - Field expectedOrderNumberBqField = Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - Field expectedOrderNumberBqFieldUrl = Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - Field expectedOrderDetailsBqField1 = Field.newBuilder("order_details", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - - Field expected = Field.newBuilder(fieldName, LegacySQLTypeName.RECORD, - FieldList.of(expectedOrderNumberBqField, - expectedOrderNumberBqFieldUrl, - expectedOrderDetailsBqField1)).setMode(Field.Mode.NULLABLE).build(); - - assertEquals(expected, field); - } - - - @Test - public void shouldConvertProtobufTimestampToBigqueryTimestamp() { - String fieldName = "timestamp_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - - BQField bqField = fieldDescriptorToBQField(fieldDescriptor); - LegacySQLTypeName bqFieldType = bqField.getType(); - - assertEquals(LegacySQLTypeName.TIMESTAMP, bqFieldType); - } - - @Test - public void shouldConvertProtobufStructToBigqueryString() { - String fieldName = "struct_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - - BQField bqField = fieldDescriptorToBQField(fieldDescriptor); - LegacySQLTypeName bqFieldType = bqField.getType(); - - assertEquals(LegacySQLTypeName.STRING, bqFieldType); - } - - @Test - public void shouldConvertProtobufDurationToBigqueryRecord() { - String fieldName = "duration_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField bqField = fieldDescriptorToBQField(fieldDescriptor); - LegacySQLTypeName bqFieldType = bqField.getType(); - - assertEquals(LegacySQLTypeName.RECORD, bqFieldType); - } - - - @Test - public void shouldConvertProtobufDoubleToBigqueryFloat() { - String fieldName = "double_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.FLOAT, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufFloatToBigqueryFloat() { - String fieldName = "float_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.FLOAT, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufBytesToBigqueryBytes() { - String fieldName = "bytes_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.BYTES, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufBoolToBigqueryBool() { - String fieldName = "bool_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.BOOLEAN, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufEnumToBigqueryString() { - String fieldName = "enum_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.STRING, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufStringToBigqueryString() { - String fieldName = "string_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.STRING, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufInt64ToBigqueryInteger() { - String fieldName = "int64_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufUint64ToBigqueryInteger() { - String fieldName = "uint64_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufInt32ToBigqueryInteger() { - String fieldName = "int32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufUint32ToBigqueryInteger() { - String fieldName = "uint32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufFixed32ToBigqueryInteger() { - String fieldName = "fixed32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufFixed64ToBigqueryInteger() { - String fieldName = "fixed64_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufSfixed32ToBigqueryInteger() { - String fieldName = "sfixed32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufSfixed64ToBigqueryInteger() { - String fieldName = "sfixed32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufSint32ToBigqueryInteger() { - String fieldName = "sint32_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufSint64ToBigqueryInteger() { - String fieldName = "sint64_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.INTEGER, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufMessageTypeToBigqueryRecord() { - String fieldName = "message_value"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.NULLABLE, LegacySQLTypeName.RECORD, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufRepeatedModifierToBigqueryRepeatedColumn() { - String fieldName = "list_values"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.REPEATED, LegacySQLTypeName.STRING, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - @Test - public void shouldConvertProtobufRepeatedMessageModifierToBigqueryRepeatedRecordColumn() { - String fieldName = "list_message_values"; - Descriptors.FieldDescriptor fieldDescriptor = testMessageDescriptor.findFieldByName(fieldName); - BQField expected = new BQField(fieldName, Field.Mode.REPEATED, LegacySQLTypeName.RECORD, new ArrayList<>()); - BQField result = fieldDescriptorToBQField(fieldDescriptor); - - assertEquals(expected, result); - } - - private Field fieldDescriptorToField(Descriptors.FieldDescriptor fieldDescriptor) { - BQField bqField = fieldDescriptorToBQField(fieldDescriptor); - return bqField.getField(); - } - - private BQField fieldDescriptorToBQField(Descriptors.FieldDescriptor fieldDescriptor) { - ProtoField protoField = new ProtoField(fieldDescriptor.toProto()); - return new BQField(protoField); - } - -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/models/ProtoFieldTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/models/ProtoFieldTest.java deleted file mode 100644 index a04e34349..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/models/ProtoFieldTest.java +++ /dev/null @@ -1,72 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Descriptors; -import io.odpf.firehose.consumer.TestTypesMessage; -import org.junit.Test; - -import java.util.List; -import java.util.stream.Collectors; - -import static org.junit.Assert.*; - -public class ProtoFieldTest { - @Test - public void shouldReturnNestedAsTrueWhenProtobufFieldTypeIsAMessage() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("duration_value").toProto(); - ProtoField protoField = new ProtoField(fieldDescriptorProto); - - assertTrue(protoField.isNested()); - } - - @Test - public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsTimestamp() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("timestamp_value").toProto(); - ProtoField protoField = new ProtoField(fieldDescriptorProto); - - assertFalse(protoField.isNested()); - } - - @Test - public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsStruct() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("struct_value").toProto(); - ProtoField protoField = new ProtoField(fieldDescriptorProto); - - assertFalse(protoField.isNested()); - } - - @Test - public void shouldReturnNestedAsFalseWhenProtobufFieldIsScalarValueTypes() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("timestamp_value").toProto(); - ProtoField protoField = new ProtoField(fieldDescriptorProto); - - assertFalse(protoField.isNested()); - } - - @Test - public void shouldReturnProtoFieldString() { - Descriptors.FieldDescriptor fieldDescriptor = TestTypesMessage.getDescriptor().findFieldByName("message_value"); - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = fieldDescriptor.toProto(); - ProtoField protoField = new ProtoField(fieldDescriptorProto); - - List childFields = fieldDescriptor.getMessageType().getFields(); - List fieldList = childFields.stream().map(fd -> new ProtoField(fd.toProto())).collect(Collectors.toList()); - fieldList.forEach(pf -> - protoField.addField(pf)); - - String protoString = protoField.toString(); - - assertEquals("{name='message_value', type=TYPE_MESSAGE, len=3, nested=[" - + "{name='order_number', type=TYPE_STRING, len=0, nested=[]}, " - + "{name='order_url', type=TYPE_STRING, len=0, nested=[]}, " - + "{name='order_details', type=TYPE_STRING, len=0, nested=[]}]}", protoString); - } - - - @Test - public void shouldReturnEmptyProtoFieldString() { - String protoString = new ProtoField().toString(); - - assertEquals("{name='null', type=null, len=0, nested=[]}", protoString); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/models/RecordTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/models/RecordTest.java deleted file mode 100644 index 8b703b303..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/models/RecordTest.java +++ /dev/null @@ -1,30 +0,0 @@ -package io.odpf.firehose.sink.bigquery.models; - -import io.odpf.firehose.message.Message; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; - -import static org.junit.Assert.*; - -public class RecordTest { - - @Test - public void shouldReturnID() { - Message message = new Message("123".getBytes(StandardCharsets.UTF_8), "abc".getBytes(StandardCharsets.UTF_8), "default", 1, 1); - Record record = new Record(message, null); - String id = record.getId(); - - assertEquals("default_1_1", id); - } - - @Test - public void shouldReturnSize() { - Message message = new Message("123".getBytes(StandardCharsets.UTF_8), "abc".getBytes(StandardCharsets.UTF_8), "default", 1, 1); - Record record = new Record(message, null); - - long size = record.getSize(); - - assertEquals(6, size); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParserTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParserTest.java deleted file mode 100644 index 8bc3b1d75..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoFieldParserTest.java +++ /dev/null @@ -1,181 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Duration; -import com.google.protobuf.Struct; -import com.google.protobuf.Timestamp; -import com.google.type.Date; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.TestNestedMessageBQ; -import io.odpf.firehose.TestRecursiveMessageBQ; -import io.odpf.firehose.sink.bigquery.models.ProtoField; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -@RunWith(MockitoJUnitRunner.class) -public class ProtoFieldParserTest { - private ProtoFieldParser protoMappingParser; - - @Before - public void setup() { - this.protoMappingParser = new ProtoFieldParser(); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfProtoNotFound() { - protoMappingParser.parseFields(null, "test", new HashMap<>(), new HashMap<>()); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfNestedProtoNotFound() { - Map descriptorMap = new HashMap() {{ - put("io.odpf.firehose.TestMessageBQ", TestMessageBQ.getDescriptor()); - }}; - ProtoField protoField = new ProtoField(); - protoMappingParser.parseFields(protoField, "io.odpf.firehose.TestNestedMessageBQ", descriptorMap, new HashMap<>()); - } - - @Test - public void shouldParseProtoSchemaForNonNestedFields() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); - fileDescriptors.add(Duration.getDescriptor().getFile()); - fileDescriptors.add(Date.getDescriptor().getFile()); - fileDescriptors.add(Struct.getDescriptor().getFile()); - fileDescriptors.add(Timestamp.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.firehose.TestMessageBQ.CurrentStateEntry", "io.odpf.firehose.TestMessageBQ.CurrentStateEntry"); - put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); - put(".google.protobuf.Duration", "com.google.protobuf.Duration"); - put(".google.type.Date", "com.google.type.Date"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.firehose.TestMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertTestMessage(protoField.getFields()); - } - - @Test - public void shouldParseProtoSchemaForRecursiveFieldTillMaxLevel() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestRecursiveMessageBQ.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.firehose.TestRecursiveMessageBQ", "io.odpf.firehose.TestRecursiveMessageBQ"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.firehose.TestRecursiveMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertField(protoField.getFields().get(0), "string_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "float_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - ProtoField recursiveField = protoField; - int totalLevel = 1; - while (recursiveField.getFields().size() == 3) { - assertField(protoField.getFields().get(0), "string_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "float_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - recursiveField = recursiveField.getFields().get(2); - totalLevel++; - } - assertEquals(15, totalLevel); - } - - @Test - public void shouldParseProtoSchemaForNestedFields() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); - fileDescriptors.add(Duration.getDescriptor().getFile()); - fileDescriptors.add(Date.getDescriptor().getFile()); - fileDescriptors.add(Struct.getDescriptor().getFile()); - fileDescriptors.add(TestNestedMessageBQ.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.firehose.TestMessageBQ.CurrentStateEntry", "io.odpf.firehose.TestMessageBQ.CurrentStateEntry"); - put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); - put(".google.protobuf.Duration", "com.google.protobuf.Duration"); - put(".google.type.Date", "com.google.type.Date"); - put(".odpf.firehose.TestMessageBQ", "io.odpf.firehose.TestMessageBQ"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.firehose.TestNestedMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertField(protoField.getFields().get(0), "nested_id", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "single_message", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - assertTestMessage(protoField.getFields().get(1).getFields()); - } - - private Map getDescriptors(ArrayList fileDescriptors) { - Map descriptorMap = new HashMap<>(); - fileDescriptors.forEach(fd -> { - String javaPackage = fd.getOptions().getJavaPackage(); - fd.getMessageTypes().forEach(desc -> { - String className = desc.getName(); - desc.getNestedTypes().forEach(nestedDesc -> { - String nestedClassName = nestedDesc.getName(); - descriptorMap.put(String.format("%s.%s.%s", javaPackage, className, nestedClassName), nestedDesc); - }); - descriptorMap.put(String.format("%s.%s", javaPackage, className), desc); - }); - }); - return descriptorMap; - } - - private void assertTestMessage(List fields) { - assertEquals(16, fields.size()); - assertField(fields.get(0), "order_number", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(1), "order_url", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - assertField(fields.get(2), "order_details", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); - assertField(fields.get(3), "created_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 4); - assertField(fields.get(4), "status", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 5); - assertField(fields.get(5), "discount", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 6); - assertField(fields.get(6), "success", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 7); - assertField(fields.get(7), "price", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 8); - assertField(fields.get(8), "current_state", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 9); - assertField(fields.get(9), "user_token", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 10); - assertField(fields.get(10), "trip_duration", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 11); - assertField(fields.get(11), "aliases", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 12); - assertField(fields.get(12), "properties", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 13); - assertField(fields.get(13), "order_date", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 14); - assertField(fields.get(14), "updated_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 15); - assertField(fields.get(15), "attributes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 16); - - assertEquals(String.format(".%s", Duration.getDescriptor().getFullName()), fields.get(10).getTypeName()); - assertEquals(2, fields.get(10).getFields().size()); - assertField(fields.get(10).getFields().get(0), "seconds", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(10).getFields().get(1), "nanos", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - assertEquals(String.format(".%s", Date.getDescriptor().getFullName()), fields.get(13).getTypeName()); - assertEquals(3, fields.get(13).getFields().size()); - assertField(fields.get(13).getFields().get(0), "year", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(13).getFields().get(1), "month", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - assertField(fields.get(13).getFields().get(2), "day", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); - } - - private void assertField(ProtoField field, String name, DescriptorProtos.FieldDescriptorProto.Type ftype, DescriptorProtos.FieldDescriptorProto.Label flabel, int index) { - assertEquals(name, field.getName()); - assertEquals(ftype, field.getType()); - assertEquals(flabel, field.getLabel()); - assertEquals(index, field.getIndex()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapperTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapperTest.java deleted file mode 100644 index 4cc90439c..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoMapperTest.java +++ /dev/null @@ -1,355 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.DescriptorProtos; -import io.odpf.firehose.sink.bigquery.models.Constants; -import io.odpf.firehose.sink.bigquery.models.ProtoField; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class ProtoMapperTest { - - private final ProtoMapper protoMapper = new ProtoMapper(); - private final ObjectMapper objectMapper = new ObjectMapper(); - - private final Map expectedType = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); - }}; - - - @Test - public void shouldTestShouldCreateFirstLevelColumnMappingSuccessfully() throws IOException { - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("order_number", 1)); - add(ProtoUtil.createProtoField("order_url", 2)); - add(ProtoUtil.createProtoField("order_details", 3)); - add(ProtoUtil.createProtoField("created_at", 4)); - add(ProtoUtil.createProtoField("status", 5)); - }}); - - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - objNode.put("3", "order_details"); - objNode.put("4", "created_at"); - objNode.put("5", "status"); - - String columnMapping = protoMapper.generateColumnMappings(protoField.getFields()); - - String expectedProtoMapping = objectMapper.writeValueAsString(objNode); - assertEquals(expectedProtoMapping, columnMapping); - } - - @Test - public void shouldTestShouldCreateNestedMapping() throws IOException { - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("order_number", 1)); - add(ProtoUtil.createProtoField("order_url", "some.type.name", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, 2, new ArrayList() {{ - add(ProtoUtil.createProtoField("host", 1)); - add(ProtoUtil.createProtoField("url", 2)); - }})); - add(ProtoUtil.createProtoField("order_details", 3)); - }}); - - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - ObjectNode innerObjNode = JsonNodeFactory.instance.objectNode(); - innerObjNode.put("1", "host"); - innerObjNode.put("2", "url"); - innerObjNode.put("record_name", "order_url"); - objNode.put("1", "order_number"); - objNode.put("2", innerObjNode); - objNode.put("3", "order_details"); - - - String columnMapping = protoMapper.generateColumnMappings(protoField.getFields()); - String expectedProtoMapping = objectMapper.writeValueAsString(objNode); - assertEquals(expectedProtoMapping, columnMapping); - } - - @Test - public void generateColumnMappingsForNoFields() throws IOException { - String protoMapping = protoMapper.generateColumnMappings(new ArrayList<>()); - assertEquals(protoMapping, "{}"); - } - - @Test - public void shouldTestConvertToSchemaSuccessful() { - List nestedBQFields = new ArrayList<>(); - nestedBQFields.add(ProtoUtil.createProtoField("field0_bytes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field1_string", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field2_bool", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field3_enum", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field4_double", DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field5_float", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - - List fields = protoMapper.generateBigquerySchema(ProtoUtil.createProtoField(nestedBQFields)); - assertEquals(nestedBQFields.size(), fields.size()); - IntStream.range(0, nestedBQFields.size()) - .forEach(index -> { - assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); - assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); - assertEquals(expectedType.get(nestedBQFields.get(index).getType()), fields.get(index).getType()); - }); - - } - - @Test - public void shouldTestConverterToSchemaForNullFields() { - List fields = protoMapper.generateBigquerySchema(null); - assertNull(fields); - } - - @Test - public void shouldTestShouldConvertIntegerDataTypes() { - List allIntTypes = new ArrayList() {{ - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64); - }}; - - List nestedBQFields = IntStream.range(0, allIntTypes.size()) - .mapToObj(index -> ProtoUtil.createProtoField("field-" + index, allIntTypes.get(index), DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)) - .collect(Collectors.toList()); - - - List fields = protoMapper.generateBigquerySchema(ProtoUtil.createProtoField(nestedBQFields)); - assertEquals(nestedBQFields.size(), fields.size()); - IntStream.range(0, nestedBQFields.size()) - .forEach(index -> { - assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); - assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); - assertEquals(LegacySQLTypeName.INTEGER, fields.get(index).getType()); - }); - } - - @Test - public void shouldTestShouldConvertNestedField() { - List nestedBQFields = new ArrayList<>(); - nestedBQFields.add(ProtoUtil.createProtoField("field1_level2_nested", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(ProtoUtil.createProtoField("field2_level2_nested", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_level1", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(ProtoUtil.createProtoField("field2_level1_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - }}); - - - List fields = protoMapper.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertEquals(nestedBQFields.size(), fields.get(1).getSubFields().size()); - - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(1)); - assertBqField(nestedBQFields.get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(1).getSubFields().get(0)); - assertBqField(nestedBQFields.get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(1).getSubFields().get(1)); - - } - - @Test - public void shouldTestShouldConvertMultiNestedFields() { - List nestedBQFields = new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_level3_nested", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(ProtoUtil.createProtoField("field2_level3_nested", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - }}; - - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_level1", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(ProtoUtil.createProtoField( - "field2_level1_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() {{ - add(ProtoUtil.createProtoField( - "field1_level2", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(ProtoUtil.createProtoField( - "field2_level2_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - add(ProtoUtil.createProtoField( - "field3_level2", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(ProtoUtil.createProtoField( - "field4_level2_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - }} - )); - }}); - - List fields = protoMapper.generateBigquerySchema(protoField); - - - assertEquals(protoField.getFields().size(), fields.size()); - assertEquals(4, fields.get(1).getSubFields().size()); - assertEquals(2, fields.get(1).getSubFields().get(1).getSubFields().size()); - assertEquals(2, fields.get(1).getSubFields().get(3).getSubFields().size()); - assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(1).getSubFields()); - assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(3).getSubFields()); - } - - @Test - public void shouldTestConvertToSchemaForTimestamp() { - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_timestamp", - Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - }}); - - List fields = protoMapper.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.TIMESTAMP, Field.Mode.NULLABLE, fields.get(0)); - } - - @Test - public void shouldTestConvertToSchemaForSpecialFields() { - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_struct", - Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(ProtoUtil.createProtoField("field2_bytes", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(ProtoUtil.createProtoField("field3_duration", - "." + com.google.protobuf.Duration.getDescriptor().getFullName(), - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() { - { - add(ProtoUtil.createProtoField("duration_seconds", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(ProtoUtil.createProtoField("duration_nanos", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - } - })); - - add(ProtoUtil.createProtoField("field3_date", - "." + com.google.type.Date.getDescriptor().getFullName(), - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() { - { - add(ProtoUtil.createProtoField("year", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(ProtoUtil.createProtoField("month", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(ProtoUtil.createProtoField("day", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - } - })); - - }}); - - List fields = protoMapper.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.BYTES, Field.Mode.NULLABLE, fields.get(1)); - assertBqField(protoField.getFields().get(2).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(2)); - assertBqField(protoField.getFields().get(3).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(3)); - assertEquals(2, fields.get(2).getSubFields().size()); - assertBqField("duration_seconds", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(2).getSubFields().get(0)); - assertBqField("duration_nanos", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(2).getSubFields().get(1)); - - assertEquals(3, fields.get(3).getSubFields().size()); - assertBqField("year", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(0)); - assertBqField("month", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(1)); - assertBqField("day", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(2)); - } - - @Test - public void shouldTestConvertToSchemaForRepeatedFields() { - ProtoField protoField = ProtoUtil.createProtoField(new ArrayList() {{ - add(ProtoUtil.createProtoField("field1_map", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); - add(ProtoUtil.createProtoField("field2_repeated", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); - - }}); - - List fields = protoMapper.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.INTEGER, Field.Mode.REPEATED, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.REPEATED, fields.get(1)); - } - - public void assertMultipleFields(List pfields, List bqFields) { - IntStream.range(0, bqFields.size()) - .forEach(index -> { - assertBqField(pfields.get(index).getName(), expectedType.get(pfields.get(index).getType()), Field.Mode.NULLABLE, bqFields.get(index)); - }); - } - - public void assertBqField(String name, LegacySQLTypeName ftype, Field.Mode mode, Field bqf) { - assertEquals(mode, bqf.getMode()); - assertEquals(name, bqf.getName()); - assertEquals(ftype, bqf.getType()); - } - - -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListenerTest.java b/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListenerTest.java deleted file mode 100644 index 705e856ca..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUpdateListenerTest.java +++ /dev/null @@ -1,269 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.cloud.bigquery.BigQueryException; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Descriptors.Descriptor; - -import io.odpf.firehose.TestKeyBQ; -import io.odpf.firehose.config.BigQuerySinkConfig; -import io.odpf.firehose.message.Message; -import io.odpf.firehose.sink.bigquery.converter.MessageRecordConverterCache; -import io.odpf.firehose.sink.bigquery.handler.BigQueryClient; -import io.odpf.firehose.sink.bigquery.models.MetadataUtil; -import io.odpf.firehose.sink.bigquery.models.ProtoField; -import io.odpf.firehose.sink.bigquery.models.Records; -import io.odpf.stencil.client.StencilClient; -import io.odpf.stencil.Parser; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.jupiter.api.Assertions; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; - -import java.io.IOException; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; - -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class ProtoUpdateListenerTest { - @Mock - private BigQueryClient bigQueryClient; - @Mock - private StencilClient stencilClient; - - private BigQuerySinkConfig config; - - private MessageRecordConverterCache converterWrapper; - - @Before - public void setUp() throws InvalidProtocolBufferException { - System.setProperty("INPUT_SCHEMA_PROTO_CLASS", "io.odpf.firehose.TestKeyBQ"); - System.setProperty("SINK_BIGQUERY_ENABLE_AUTO_SCHEMA_UPDATE", "false"); - config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - converterWrapper = new MessageRecordConverterCache(); - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); - } - - @Test - public void shouldUseNewSchemaIfProtoChanges() throws IOException { - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(MetadataUtil.getMetadataFields()); - }}; - doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - - Parser protoParser = stencilClient.getParser(config.getInputSchemaProtoClass()); - protoUpdateListener.setStencilParser(protoParser); - protoUpdateListener.onSchemaUpdate(descriptorsMap); - TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - Instant now = Instant.now(); - Message testMessage = new Message("".getBytes(), testKeyBQ.toByteArray(), "topic", 1, 1); - Records convert = protoUpdateListener.getMessageRecordConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage), now); - Assert.assertEquals(1, convert.getValidRecords().size()); - Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); - Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); - } - - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfParserFails() { - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s.%s", TestKeyBQ.class.getPackage(), TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - protoUpdateListener.onSchemaUpdate(descriptorsMap); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfConverterFails() throws IOException { - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s.%s", TestKeyBQ.class.getPackage(), TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(MetadataUtil.getMetadataFields()); - }}; - doThrow(new BigQueryException(10, "bigquery mapping has failed")).when(bigQueryClient).upsertTable(bqSchemaFields); - - protoUpdateListener.onSchemaUpdate(descriptorsMap); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfDatasetLocationIsChanged() throws IOException { - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s.%s", TestKeyBQ.class.getPackage(), TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(MetadataUtil.getMetadataFields()); - }}; - doThrow(new RuntimeException("cannot change dataset location")).when(bigQueryClient).upsertTable(bqSchemaFields); - - protoUpdateListener.onSchemaUpdate(descriptorsMap); - } - - @Test - public void shouldNotNamespaceMetadataFieldsWhenNamespaceIsNotProvided() throws IOException { - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(MetadataUtil.getMetadataFields()); // metadata fields are not namespaced - }}; - doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - Parser protoParser = stencilClient.getParser(config.getInputSchemaProtoClass()); - protoUpdateListener.setStencilParser(protoParser); - protoUpdateListener.onSchemaUpdate(descriptorsMap); - TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - Instant now = Instant.now(); - Message testMessage = new Message("".getBytes(), testKeyBQ.toByteArray(), "topic", 1, 1); - Records convert = protoUpdateListener.getMessageRecordConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage), now); - Assert.assertEquals(1, convert.getValidRecords().size()); - Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); - Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); - verify(bigQueryClient, times(1)).upsertTable(bqSchemaFields); // assert that metadata fields were not namespaced - } - - @Test - public void shouldNamespaceMetadataFieldsWhenNamespaceIsProvided() throws IOException { - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_ns"); - config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(MetadataUtil.getNamespacedMetadataField(config.getBqMetadataNamespace())); // metadata fields are namespaced - }}; - doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - - Parser protoParser = stencilClient.getParser(config.getInputSchemaProtoClass()); - protoUpdateListener.setStencilParser(protoParser); - protoUpdateListener.onSchemaUpdate(descriptorsMap); - TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - Instant now = Instant.now(); - Message testMessage = new Message("".getBytes(), testKeyBQ.toByteArray(), "topic", 1, 1); - Records convert = protoUpdateListener.getMessageRecordConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage), now); - Assert.assertEquals(1, convert.getValidRecords().size()); - Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); - Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); - - verify(bigQueryClient, times(1)).upsertTable(bqSchemaFields); - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); - } - - @Test - public void shouldThrowExceptionWhenMetadataNamespaceNameCollidesWithAnyFieldName() throws IOException { - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "order_number"); // set field name to an existing column name - config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - ProtoUpdateListener protoUpdateListener = new ProtoUpdateListener(config, bigQueryClient, converterWrapper); - - ProtoField returnedProtoField = new ProtoField(); - returnedProtoField.addField(ProtoUtil.createProtoField("order_number", 1)); - returnedProtoField.addField(ProtoUtil.createProtoField("order_url", 2)); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - ObjectNode objNode = JsonNodeFactory.instance.objectNode(); - objNode.put("1", "order_number"); - objNode.put("2", "order_url"); - - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(MetadataUtil.getNamespacedMetadataField(config.getBqMetadataNamespace())); - }}; - - Exception exception = Assertions.assertThrows(RuntimeException.class, () -> { - protoUpdateListener.onSchemaUpdate(descriptorsMap); - }); - Assert.assertEquals("Metadata field(s) is already present in the schema. fields: [order_number]", exception.getMessage()); - verify(bigQueryClient, times(0)).upsertTable(bqSchemaFields); - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); - } - -} diff --git a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUtil.java b/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUtil.java deleted file mode 100644 index e253e7053..000000000 --- a/src/test/java/io/odpf/firehose/sink/bigquery/proto/ProtoUtil.java +++ /dev/null @@ -1,65 +0,0 @@ -package io.odpf.firehose.sink.bigquery.proto; - -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Duration; -import com.google.protobuf.Timestamp; -import io.odpf.firehose.StatusBQ; -import io.odpf.firehose.TestMessageBQ; -import io.odpf.firehose.TestNestedMessageBQ; -import io.odpf.firehose.sink.bigquery.models.ProtoField; - -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -public class ProtoUtil { - private static final int TRIP_DURATION_NANOS = 1000000000; - private static int call = 0; - - public static TestMessageBQ generateTestMessage(Instant now) { - call++; - Timestamp createdAt = Timestamp.newBuilder().setSeconds(now.getEpochSecond()).setNanos(now.getNano()).build(); - return TestMessageBQ.newBuilder() - .setOrderNumber("order-" + call) - .setOrderUrl("order-url-" + call) - .setOrderDetails("order-details-" + call) - .setCreatedAt(createdAt) - .setStatus(StatusBQ.COMPLETED) - .setTripDuration(Duration.newBuilder().setSeconds(1).setNanos(TRIP_DURATION_NANOS).build()) - .addUpdatedAt(createdAt) - .addUpdatedAt(createdAt) - .build(); - - } - - public static TestNestedMessageBQ generateTestNestedMessage(String nestedId, TestMessageBQ message) { - return TestNestedMessageBQ.newBuilder() - .setSingleMessage(message) - .setNestedId(nestedId) - .build(); - } - - public static ProtoField createProtoField(String name, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label) { - return new ProtoField(name, "", type, label, new ArrayList<>(), 0); - } - - public static ProtoField createProtoField(List subFields) { - return new ProtoField("", "", null, null, subFields, 0); - } - - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label) { - return new ProtoField(name, typeName, type, label, new ArrayList<>(), 0); - } - - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label, List fields) { - return new ProtoField(name, typeName, type, label, fields, 0); - } - - public static ProtoField createProtoField(String name, int index) { - return new ProtoField(name, "", null, null, new ArrayList<>(), index); - } - - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, int index, List fields) { - return new ProtoField(name, typeName, type, null, fields, index); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/blob/BlobSinkTest.java b/src/test/java/io/odpf/firehose/sink/blob/BlobSinkTest.java index 597066d7d..a72f76f47 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/BlobSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/BlobSinkTest.java @@ -1,15 +1,15 @@ package io.odpf.firehose.sink.blob; import com.google.protobuf.DynamicMessage; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.TestMessageBQ; import io.odpf.firehose.consumer.kafka.OffsetManager; import io.odpf.firehose.message.Message; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.EmptyMessageException; import io.odpf.firehose.exception.UnknownFieldsException; import io.odpf.firehose.exception.SinkException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.blob.message.MessageDeSerializer; import io.odpf.firehose.sink.blob.message.Record; import io.odpf.firehose.sink.blob.writer.WriterOrchestrator; @@ -19,7 +19,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.Arrays; @@ -30,7 +30,6 @@ import java.util.Map; import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @@ -40,7 +39,7 @@ public class BlobSinkTest { private WriterOrchestrator writerOrchestrator; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private MessageDeSerializer messageDeSerializer; @@ -52,7 +51,7 @@ public class BlobSinkTest { @Before public void setUp() throws Exception { offsetManager = new OffsetManager(); - blobSink = new BlobSink(instrumentation, "objectstorage", offsetManager, writerOrchestrator, messageDeSerializer); + blobSink = new BlobSink(firehoseInstrumentation, "objectstorage", offsetManager, writerOrchestrator, messageDeSerializer); } @Test @@ -113,7 +112,7 @@ public void shouldReturnCommittableOffsets() throws Exception { @Test public void shouldReturnMessageThatCausedDeserializerException() throws Exception { - blobSink = new BlobSink(instrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); + blobSink = new BlobSink(firehoseInstrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); Message message1 = new Message("".getBytes(), "".getBytes(), "booking", 1, 1); Message message2 = new Message("".getBytes(), "".getBytes(), "booking", 1, 2); @@ -170,7 +169,7 @@ public void shouldManageOffset() { @Test public void shouldReturnMessagesWhenMessagesHasErrorCausedByEmptyMessageException() { - blobSink = new BlobSink(instrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); + blobSink = new BlobSink(firehoseInstrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); Message message1 = new Message("".getBytes(), "".getBytes(), "booking", 2, 1); Message message2 = new Message("".getBytes(), "".getBytes(), "booking", 2, 2); @@ -186,7 +185,7 @@ public void shouldReturnMessagesWhenMessagesHasErrorCausedByEmptyMessageExceptio @Test public void shouldReturnMessagesWhenMessagesHasErrorCausedByUnknownFields() { - blobSink = new BlobSink(instrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); + blobSink = new BlobSink(firehoseInstrumentation, "objectstorage", new OffsetManager(), writerOrchestrator, messageDeSerializer); Message message1 = new Message("".getBytes(), "".getBytes(), "booking", 2, 1); Message message2 = new Message("".getBytes(), "".getBytes(), "booking", 2, 2); diff --git a/src/test/java/io/odpf/firehose/sink/blob/message/MessageDeSerializerTest.java b/src/test/java/io/odpf/firehose/sink/blob/message/MessageDeSerializerTest.java index 302ceb0e0..b42d99b81 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/message/MessageDeSerializerTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/message/MessageDeSerializerTest.java @@ -18,7 +18,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.mockito.Mockito.*; diff --git a/src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoMessageUtilsTest.java b/src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoFirehoseMessageUtilsTest.java similarity index 97% rename from src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoMessageUtilsTest.java rename to src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoFirehoseMessageUtilsTest.java index f92d672c6..249f7f22a 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoMessageUtilsTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/proto/KafkaMetadataProtoFirehoseMessageUtilsTest.java @@ -7,7 +7,7 @@ import static org.junit.Assert.*; -public class KafkaMetadataProtoMessageUtilsTest { +public class KafkaMetadataProtoFirehoseMessageUtilsTest { @Test public void shouldCreateDescriptors() { diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/WriterOrchestratorTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/WriterOrchestratorTest.java index b1f12cca3..e8e8c9cb6 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/WriterOrchestratorTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/WriterOrchestratorTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.blob.writer; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.BlobSinkConfig; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.common.blobstorage.BlobStorage; import io.odpf.firehose.sink.blob.Constants; import io.odpf.firehose.sink.blob.TestProtoMessage; @@ -17,7 +17,7 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.time.Instant; diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalFileCheckerTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalFileCheckerTest.java index 96a4abaef..f58a632a8 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalFileCheckerTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalFileCheckerTest.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.blob.writer.local; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -35,7 +35,7 @@ public class LocalFileCheckerTest { private LocalStorage localStorage; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -52,7 +52,7 @@ public void setup() throws IOException { initMocks(this); toBeFlushedToRemotePaths.clear(); writerMap.clear(); - worker = new LocalFileChecker(toBeFlushedToRemotePaths, writerMap, localStorage, instrumentation); + worker = new LocalFileChecker(toBeFlushedToRemotePaths, writerMap, localStorage, firehoseInstrumentation); } @Test @@ -147,7 +147,7 @@ public void shouldRecordMetricOfSuccessfullyClosedFiles() throws IOException { when(writer1.getMetadata()).thenReturn(new LocalFileMetadata("/tmp", "/tmp/a/random-file-name-1", 1L, recordCount, fileSize)); when(writer1.closeAndFetchMetaData()).thenReturn(new LocalFileMetadata("/tmp", "/tmp/a/random-file-name-1", 1L, recordCount, fileSize)); worker.run(); - verify(instrumentation).incrementCounter(LOCAL_FILE_CLOSE_TOTAL, SUCCESS_TAG); + verify(firehoseInstrumentation).incrementCounter(LOCAL_FILE_CLOSE_TOTAL, SUCCESS_TAG); } @Test @@ -159,7 +159,7 @@ public void shouldRecordMetricOfClosingTimeDuration() throws IOException { when(writer1.closeAndFetchMetaData()).thenReturn(new LocalFileMetadata("/tmp", "/tmp/a/random-file-name-1", 1L, recordCount, fileSize)); worker.run(); - verify(instrumentation, times(1)).captureDurationSince(eq(LOCAL_FILE_CLOSING_TIME_MILLISECONDS), any(Instant.class)); + verify(firehoseInstrumentation, times(1)).captureDurationSince(eq(LOCAL_FILE_CLOSING_TIME_MILLISECONDS), any(Instant.class)); } @Test @@ -171,7 +171,7 @@ public void shouldRecordMetricOfFileSizeInBytes() throws IOException { when(writer1.closeAndFetchMetaData()).thenReturn(new LocalFileMetadata("/tmp", "/tmp/a/random-file-name-1", 1L, recordCount, fileSize)); worker.run(); - verify(instrumentation, times(1)).captureCount(LOCAL_FILE_SIZE_BYTES, fileSize); + verify(firehoseInstrumentation, times(1)).captureCount(LOCAL_FILE_SIZE_BYTES, fileSize); } @Test @@ -187,7 +187,7 @@ public void shouldRecordMetricOfFailedClosedFiles() throws IOException { } catch (LocalFileWriterFailedException ignored) { } - verify(instrumentation, times(1)).incrementCounter(LOCAL_FILE_CLOSE_TOTAL, FAILURE_TAG); + verify(firehoseInstrumentation, times(1)).incrementCounter(LOCAL_FILE_CLOSE_TOTAL, FAILURE_TAG); } @Test @@ -211,7 +211,7 @@ public void shouldCaptureValueOfFileOpenCount() throws IOException { verify(writer2, times(1)).closeAndFetchMetaData(); Assert.assertEquals(2, toBeFlushedToRemotePaths.size()); Assert.assertEquals(0, writerMap.size()); - verify(instrumentation, times(3)).captureValue(LOCAL_FILE_OPEN_TOTAL, 2); - verify(instrumentation, times(3)).captureValue(LOCAL_FILE_OPEN_TOTAL, 0); + verify(firehoseInstrumentation, times(3)).captureValue(LOCAL_FILE_OPEN_TOTAL, 2); + verify(firehoseInstrumentation, times(3)).captureValue(LOCAL_FILE_OPEN_TOTAL, 0); } } diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalStorageTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalStorageTest.java index 0ba2b3cd6..3e4b06ff9 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalStorageTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/local/LocalStorageTest.java @@ -3,7 +3,7 @@ import com.google.protobuf.Descriptors; import io.odpf.firehose.config.BlobSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.blob.Constants; import io.odpf.firehose.sink.blob.writer.local.policy.WriterPolicy; import org.junit.Test; @@ -20,14 +20,14 @@ public void shouldDeleteFiles() throws Exception { BlobSinkConfig sinkConfig = Mockito.mock(BlobSinkConfig.class); List metadataFieldDescriptor = new ArrayList<>(); List policies = new ArrayList<>(); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - LocalStorage storage = new LocalStorage(sinkConfig, null, metadataFieldDescriptor, policies, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + LocalStorage storage = new LocalStorage(sinkConfig, null, metadataFieldDescriptor, policies, firehoseInstrumentation); LocalStorage spy = Mockito.spy(storage); Mockito.doNothing().when(spy).deleteLocalFile(Paths.get("/tmp/a"), Paths.get("/tmp/.a.crc")); Mockito.when(sinkConfig.getLocalFileWriterType()).thenReturn(Constants.WriterType.PARQUET); spy.deleteLocalFile("/tmp/a"); Mockito.verify(spy, Mockito.times(1)).deleteLocalFile(Paths.get("/tmp/a"), Paths.get("/tmp/.a.crc")); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Deleting Local File {}", Paths.get("/tmp/a")); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Deleting Local File {}", Paths.get("/tmp/.a.crc")); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Deleting Local File {}", Paths.get("/tmp/a")); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Deleting Local File {}", Paths.get("/tmp/.a.crc")); } } diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/local/TimePartitionedPathUtilsTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/local/TimePartitionedPathUtilsTest.java index dbd33c436..8d6d3fa86 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/local/TimePartitionedPathUtilsTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/local/TimePartitionedPathUtilsTest.java @@ -10,7 +10,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.nio.file.Path; import java.nio.file.Paths; diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/SizeBasedRotatingPolicyTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/SizeBasedRotatingPolicyTest.java index c14d68b3f..f5011a916 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/SizeBasedRotatingPolicyTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/SizeBasedRotatingPolicyTest.java @@ -6,7 +6,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class SizeBasedRotatingPolicyTest { diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/TimeBasedRotatingPolicyTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/TimeBasedRotatingPolicyTest.java index dbac6e5be..b6c0133d4 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/TimeBasedRotatingPolicyTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/local/policy/TimeBasedRotatingPolicyTest.java @@ -6,7 +6,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TimeBasedRotatingPolicyTest { diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageCheckerTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageCheckerTest.java index 85f857e33..71a204fe4 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageCheckerTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageCheckerTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.blob.writer.remote; import io.odpf.firehose.config.BlobSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.common.blobstorage.BlobStorage; import io.odpf.firehose.sink.blob.Constants; import io.odpf.firehose.sink.blob.writer.local.LocalFileMetadata; @@ -13,7 +13,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.ArrayList; @@ -24,7 +24,6 @@ import static io.odpf.firehose.metrics.Metrics.*; import static io.odpf.firehose.metrics.BlobStorageMetrics.*; import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @@ -43,7 +42,7 @@ public class BlobStorageCheckerTest { @Mock private BlobSinkConfig sinkConfig = Mockito.mock(BlobSinkConfig.class); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private LocalFileMetadata localFileMetadata; private String objectName; @@ -59,7 +58,7 @@ public void setup() { remoteUploadFutures, remoteUploadScheduler, blobStorage, - instrumentation); + firehoseInstrumentation); toBeFlushedToRemotePaths.clear(); flushedToRemotePaths.clear(); remoteUploadFutures.clear(); @@ -114,7 +113,7 @@ public void shouldRecordMetricOfFileUploadedCount() throws ExecutionException, I when(remoteUploadScheduler.submit(any(Callable.class))).thenReturn(f); worker.run(); - verify(instrumentation, times(1)).incrementCounter(FILE_UPLOAD_TOTAL, SUCCESS_TAG); + verify(firehoseInstrumentation, times(1)).incrementCounter(FILE_UPLOAD_TOTAL, SUCCESS_TAG); } @Test @@ -125,7 +124,7 @@ public void shouldRecordMetricOfFileUploadBytes() throws ExecutionException, Int when(f.get()).thenReturn(10L); when(remoteUploadScheduler.submit(any(Callable.class))).thenReturn(f); worker.run(); - verify(instrumentation).captureCount(FILE_UPLOAD_BYTES, localFileMetadata.getSize()); + verify(firehoseInstrumentation).captureCount(FILE_UPLOAD_BYTES, localFileMetadata.getSize()); } @Test @@ -138,7 +137,7 @@ public void shouldRecordMetricOfUploadDuration() throws ExecutionException, Inte when(f.get()).thenReturn(totalTime); worker.run(); - verify(instrumentation, (times(1))).captureDuration(FILE_UPLOAD_TIME_MILLISECONDS, totalTime); + verify(firehoseInstrumentation, (times(1))).captureDuration(FILE_UPLOAD_TIME_MILLISECONDS, totalTime); } @Test @@ -156,7 +155,7 @@ public void shouldRecordMetricOfUploadFailedCountWhenUploadFutureThrowsInterrupt } catch (RuntimeException ignored) { } - verify(instrumentation, times(1)).incrementCounter(FILE_UPLOAD_TOTAL, + verify(firehoseInstrumentation, times(1)).incrementCounter(FILE_UPLOAD_TOTAL, FAILURE_TAG, tag(BLOB_STORAGE_ERROR_TYPE_TAG, "")); } diff --git a/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandlerTest.java b/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandlerTest.java index 8fb1f6ed5..fb10648e5 100644 --- a/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sink/blob/writer/remote/BlobStorageWriterFutureHandlerTest.java @@ -1,11 +1,12 @@ package io.odpf.firehose.sink.blob.writer.remote; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.metrics.BlobStorageMetrics; import io.odpf.firehose.sink.blob.writer.local.LocalFileMetadata; import org.junit.Assert; import org.junit.Test; +import org.junit.jupiter.api.Assertions; import org.mockito.Mockito; import java.io.IOException; @@ -18,8 +19,8 @@ public class BlobStorageWriterFutureHandlerTest { public void shouldNotFilterUnfinishedFuture() { Future future = Mockito.mock(Future.class); LocalFileMetadata localFileMetadata = Mockito.mock(LocalFileMetadata.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, firehoseInstrumentation); Mockito.when(future.isDone()).thenReturn(false); Assert.assertFalse(handler.isFinished()); } @@ -28,29 +29,29 @@ public void shouldNotFilterUnfinishedFuture() { public void shouldFilterFinishedFuture() throws Exception { Future future = Mockito.mock(Future.class); LocalFileMetadata localFileMetadata = Mockito.mock(LocalFileMetadata.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, firehoseInstrumentation); Mockito.when(future.isDone()).thenReturn(true); Mockito.when(future.get()).thenReturn(1000L); Mockito.when(localFileMetadata.getFullPath()).thenReturn("/tmp/test"); Mockito.when(localFileMetadata.getSize()).thenReturn(1024L); Assert.assertTrue(handler.isFinished()); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Flushed to blob storage {}", "/tmp/test"); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(BlobStorageMetrics.FILE_UPLOAD_TOTAL, Metrics.SUCCESS_TAG); - Mockito.verify(instrumentation, Mockito.times(1)).captureCount(BlobStorageMetrics.FILE_UPLOAD_BYTES, 1024L); - Mockito.verify(instrumentation, Mockito.times(1)).captureDuration(BlobStorageMetrics.FILE_UPLOAD_TIME_MILLISECONDS, 1000L); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Flushed to blob storage {}", "/tmp/test"); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).incrementCounter(BlobStorageMetrics.FILE_UPLOAD_TOTAL, Metrics.SUCCESS_TAG); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureCount(BlobStorageMetrics.FILE_UPLOAD_BYTES, 1024L); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureDuration(BlobStorageMetrics.FILE_UPLOAD_TIME_MILLISECONDS, 1000L); } @Test public void shouldThrowException() throws Exception { Future future = Mockito.mock(Future.class); LocalFileMetadata localFileMetadata = Mockito.mock(LocalFileMetadata.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + BlobStorageWriterFutureHandler handler = new BlobStorageWriterFutureHandler(future, localFileMetadata, firehoseInstrumentation); Mockito.when(future.isDone()).thenReturn(true); Mockito.when(future.get()).thenThrow(new ExecutionException(new IOException())); Mockito.when(localFileMetadata.getFullPath()).thenReturn("/tmp/test"); Mockito.when(localFileMetadata.getSize()).thenReturn(1024L); - Assert.assertThrows(BlobStorageFailedException.class, () -> handler.isFinished()); + Assertions.assertThrows(BlobStorageFailedException.class, () -> handler.isFinished()); } } diff --git a/src/test/java/io/odpf/firehose/sink/log/KeyOrMessageParserTest.java b/src/test/java/io/odpf/firehose/sink/common/KeyOrMessageParserTest.java similarity index 95% rename from src/test/java/io/odpf/firehose/sink/log/KeyOrMessageParserTest.java rename to src/test/java/io/odpf/firehose/sink/common/KeyOrMessageParserTest.java index 90835c732..556ca6e00 100644 --- a/src/test/java/io/odpf/firehose/sink/log/KeyOrMessageParserTest.java +++ b/src/test/java/io/odpf/firehose/sink/common/KeyOrMessageParserTest.java @@ -1,4 +1,4 @@ -package io.odpf.firehose.sink.log; +package io.odpf.firehose.sink.common; import io.odpf.firehose.config.AppConfig; @@ -11,7 +11,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; diff --git a/src/test/java/io/odpf/firehose/sink/dlq/KafkaDlqWriterTest.java b/src/test/java/io/odpf/firehose/sink/dlq/KafkaDlqWriterTest.java index 7905e0a78..0ac910182 100644 --- a/src/test/java/io/odpf/firehose/sink/dlq/KafkaDlqWriterTest.java +++ b/src/test/java/io/odpf/firehose/sink/dlq/KafkaDlqWriterTest.java @@ -4,7 +4,7 @@ import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.dlq.kafka.KafkaDlqWriter; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; @@ -17,7 +17,8 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.ArrayList; @@ -27,7 +28,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @@ -40,13 +40,13 @@ public class KafkaDlqWriterTest { private KafkaProducer kafkaProducer; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private KafkaDlqWriter kafkaDlqWriter; @Before public void setUp() throws Exception { - kafkaDlqWriter = new KafkaDlqWriter(kafkaProducer, "test-topic", instrumentation); + kafkaDlqWriter = new KafkaDlqWriter(kafkaProducer, "test-topic", firehoseInstrumentation); } @Test @@ -54,7 +54,7 @@ public void shouldReturnEmptyListWhenWriteEmptyMessages() throws IOException { ArrayList messages = new ArrayList<>(); List messageList = kafkaDlqWriter.write(messages); - verifyZeroInteractions(kafkaProducer); + Mockito.verifyNoInteractions(kafkaProducer); assertTrue(messageList.isEmpty()); } @@ -180,8 +180,8 @@ public void shouldRecordMessagesToBeSendToKafkaRetryQueue() throws Exception { calls.get(0).onCompletion(null, null); calls.get(1).onCompletion(null, null); completedLatch.await(); - verify(instrumentation, times(1)).logInfo("Pushing {} messages to retry queue topic : {}", 2, "test-topic"); - verify(instrumentation, times(1)).logInfo("Successfully pushed {} messages to {}", 2, "test-topic"); + verify(firehoseInstrumentation, times(1)).logInfo("Pushing {} messages to retry queue topic : {}", 2, "test-topic"); + verify(firehoseInstrumentation, times(1)).logInfo("Successfully pushed {} messages to {}", 2, "test-topic"); } @Test diff --git a/src/test/java/io/odpf/firehose/sink/dlq/blobstorage/BlobStorageDlqWriterTest.java b/src/test/java/io/odpf/firehose/sink/dlq/blobstorage/BlobStorageDlqWriterTest.java index 0425931f6..a9e0637d7 100644 --- a/src/test/java/io/odpf/firehose/sink/dlq/blobstorage/BlobStorageDlqWriterTest.java +++ b/src/test/java/io/odpf/firehose/sink/dlq/blobstorage/BlobStorageDlqWriterTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.dlq.blobstorage; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.sink.common.blobstorage.BlobStorageException; @@ -11,7 +11,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.time.Instant; @@ -20,10 +20,7 @@ import java.util.Comparator; import java.util.List; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.contains; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.verify; diff --git a/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactoryTest.java index 4feec4b84..9ded05d7c 100644 --- a/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkFactoryTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.elasticsearch; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.depot.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import io.odpf.stencil.client.StencilClient; import org.apache.http.HttpHost; @@ -25,7 +25,7 @@ public class EsSinkFactoryTest { private StatsDReporter statsDReporter; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private StencilClient stencilClient; @@ -46,7 +46,7 @@ public void shouldCreateESSink() { @Test public void shouldThrowIllegalArgumentExceptionForEmptyESConnectionURLs() { try { - EsSinkFactory.getHttpHosts("", instrumentation); + EsSinkFactory.getHttpHosts("", firehoseInstrumentation); } catch (Exception e) { assertEquals(IllegalArgumentException.class, e.getClass()); assertEquals("SINK_ES_CONNECTION_URLS is empty or null", e.getMessage()); @@ -56,7 +56,7 @@ public void shouldThrowIllegalArgumentExceptionForEmptyESConnectionURLs() { @Test public void shouldThrowIllegalArgumentExceptionForNullESConnectionURLs() { try { - EsSinkFactory.getHttpHosts(null, instrumentation); + EsSinkFactory.getHttpHosts(null, firehoseInstrumentation); } catch (Exception e) { assertEquals(IllegalArgumentException.class, e.getClass()); assertEquals("SINK_ES_CONNECTION_URLS is empty or null", e.getMessage()); @@ -67,7 +67,7 @@ public void shouldThrowIllegalArgumentExceptionForNullESConnectionURLs() { public void shouldThrowIllegalArgumentExceptionForEmptyHostName() { String esConnectionURLs = ":1000"; try { - EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); } catch (Exception e) { assertEquals(IllegalArgumentException.class, e.getClass()); } @@ -77,7 +77,7 @@ public void shouldThrowIllegalArgumentExceptionForEmptyHostName() { public void shouldThrowIllegalArgumentExceptionForEmptyPort() { String esConnectionURLs = "localhost:"; try { - EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); } catch (Exception e) { assertEquals(IllegalArgumentException.class, e.getClass()); } @@ -86,7 +86,7 @@ public void shouldThrowIllegalArgumentExceptionForEmptyPort() { @Test public void shouldGetHttpHostsForValidESConnectionURLs() { String esConnectionURLs = "localhost_1:1000,localhost_2:1000"; - HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); assertEquals("localhost_1", httpHosts[0].getHostName()); assertEquals(1000, httpHosts[0].getPort()); @@ -97,7 +97,7 @@ public void shouldGetHttpHostsForValidESConnectionURLs() { @Test public void shouldGetHttpHostsForValidESConnectionURLsWithSpacesInBetween() { String esConnectionURLs = " localhost_1: 1000, localhost_2:1000"; - HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); assertEquals("localhost_1", httpHosts[0].getHostName()); assertEquals(1000, httpHosts[0].getPort()); @@ -108,7 +108,7 @@ public void shouldGetHttpHostsForValidESConnectionURLsWithSpacesInBetween() { @Test public void shouldGetHttpHostsForIPInESConnectionURLs() { String esConnectionURLs = "172.28.32.156:1000"; - HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + HttpHost[] httpHosts = EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); assertEquals("172.28.32.156", httpHosts[0].getHostName()); assertEquals(1000, httpHosts[0].getPort()); @@ -118,7 +118,7 @@ public void shouldGetHttpHostsForIPInESConnectionURLs() { public void shouldThrowExceptionIfHostAndPortNotProvidedProperly() { String esConnectionURLs = "test"; try { - EsSinkFactory.getHttpHosts(esConnectionURLs, instrumentation); + EsSinkFactory.getHttpHosts(esConnectionURLs, firehoseInstrumentation); } catch (Exception e) { assertEquals(IllegalArgumentException.class, e.getClass()); assertEquals("SINK_ES_CONNECTION_URLS should contain host and port both", e.getMessage()); diff --git a/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkTest.java b/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkTest.java index 16c6bc9b9..e2b68bbe7 100644 --- a/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/elasticsearch/EsSinkTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.enums.SinkType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.NeedToRetry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.elasticsearch.request.EsRequestHandler; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -30,7 +30,7 @@ public class EsSinkTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private RestHighLevelClient client; @Mock @@ -68,7 +68,7 @@ public void setUp() { @Test public void shouldGetRequestForEachMessageInEsbMessagesList() { - EsSink esSink = new EsSink(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); + EsSink esSink = new EsSink(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSink.prepare(messages); verify(esRequestHandler, times(1)).getRequest(messages.get(0)); @@ -78,7 +78,7 @@ public void shouldGetRequestForEachMessageInEsbMessagesList() { @Test public void shouldReturnEmptyArrayListWhenBulkResponseExecutedSuccessfully() throws IOException { when(bulkResponse.hasFailures()).thenReturn(false); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); @@ -94,7 +94,7 @@ public void shouldThrowNeedToRetryExceptionWhenBulkResponseHasFailuresExceptMent BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); @@ -114,7 +114,7 @@ public void shouldReturnEsbMessagesListWhenBulkResponseHasFailuresAndEmptyBlackl BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, new ArrayList<>()); esSinkMock.setBulkResponse(bulkResponse); @@ -130,7 +130,7 @@ public void shouldReturnEsbMessagesListWhenBulkResponseHasFailuresWithStatusOthe BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); @@ -146,7 +146,7 @@ public void shouldReturnEmptyMessageListIfAllTheResponsesBelongToBlacklistStatus BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); @@ -161,13 +161,13 @@ public void shouldReportTelemetryIfTheResponsesBelongToBlacklistStatusCode() thr BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); esSinkMock.pushMessage(this.messages); - verify(instrumentation, times(2)).logInfo("Not retrying due to response status: {} is under blacklisted status code", "404"); - verify(instrumentation, times(2)).logInfo("Message dropped because of status code: 404"); - verify(instrumentation, times(2)).incrementCounter("firehose_sink_messages_drop_total", "cause=NOT_FOUND"); + verify(firehoseInstrumentation, times(2)).logInfo("Not retrying due to response status: {} is under blacklisted status code", "404"); + verify(firehoseInstrumentation, times(2)).logInfo("Message dropped because of status code: 404"); + verify(firehoseInstrumentation, times(2)).incrementCounter("firehose_sink_messages_drop_total", "cause=NOT_FOUND"); } @Test @@ -178,14 +178,14 @@ public void shouldThrowNeedToRetryExceptionIfSomeOfTheFailuresDontBelongToBlackl BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2, bulkResponseItemMock3}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); String logMessage = "CgYIyOm+xgUSBgiE6r7GBRgNIICAgIDA9/y0LigCMAM\u003d"; Message messageWithProto = new Message(null, Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); messages.add(messageWithProto); List failedMessages = esSinkMock.pushMessage(this.messages); - verify(instrumentation, times(2)).incrementCounter(any(String.class), any(String.class)); + verify(firehoseInstrumentation, times(2)).incrementCounter(any(String.class), any(String.class)); Assert.assertEquals(3, failedMessages.size()); } @@ -196,13 +196,13 @@ public void shouldLogBulkRequestFailedWhenBulkResponsesHasFailures() { BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(true); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); esSinkMock.pushMessage(this.messages); - verify(instrumentation, times(1)).logWarn("Bulk request failed"); - verify(instrumentation, times(1)).logWarn("Bulk request failed count: {}", 2); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failed"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failed count: {}", 2); } @Test @@ -212,22 +212,22 @@ public void shouldNotLogBulkRequestFailedWhenBulkResponsesHasNotFailures() { BulkItemResponse[] bulkItemResponses = {bulkResponseItemMock1, bulkResponseItemMock2}; when(bulkResponse.hasFailures()).thenReturn(false); when(bulkResponse.getItems()).thenReturn(bulkItemResponses); - EsSinkMock esSinkMock = new EsSinkMock(instrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, + EsSinkMock esSinkMock = new EsSinkMock(firehoseInstrumentation, SinkType.ELASTICSEARCH.name(), client, esRequestHandler, 5000, 1, esRetryStatusCodeBlacklist); esSinkMock.setBulkResponse(bulkResponse); esSinkMock.pushMessage(this.messages); - verify(instrumentation, times(0)).logWarn("Bulk request failed"); - verify(instrumentation, times(0)).logWarn("Bulk request failed count: {}", 2); + verify(firehoseInstrumentation, times(0)).logWarn("Bulk request failed"); + verify(firehoseInstrumentation, times(0)).logWarn("Bulk request failed count: {}", 2); } public static class EsSinkMock extends EsSink { private BulkResponse bulkResponse; - public EsSinkMock(Instrumentation instrumentation, String sinkType, RestHighLevelClient client, EsRequestHandler esRequestHandler, + public EsSinkMock(FirehoseInstrumentation firehoseInstrumentation, String sinkType, RestHighLevelClient client, EsRequestHandler esRequestHandler, long esRequestTimeoutInMs, Integer esWaitForActiveShardsCount, List esRetryStatusCodeBlacklist) { - super(instrumentation, sinkType, client, esRequestHandler, esRequestTimeoutInMs, esWaitForActiveShardsCount, esRetryStatusCodeBlacklist); + super(firehoseInstrumentation, sinkType, client, esRequestHandler, esRequestTimeoutInMs, esWaitForActiveShardsCount, esRetryStatusCodeBlacklist); } public void setBulkResponse(BulkResponse bulkResponse) { diff --git a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpdateRequestHandlerTest.java b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpdateRequestHandlerTest.java index 3898b270e..8e165e976 100644 --- a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpdateRequestHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpdateRequestHandlerTest.java @@ -1,7 +1,6 @@ package io.odpf.firehose.sink.elasticsearch.request; - import io.odpf.firehose.config.enums.EsSinkMessageType; import io.odpf.firehose.config.enums.EsSinkRequestType; import io.odpf.firehose.message.Message; @@ -12,26 +11,23 @@ import com.google.gson.reflect.TypeToken; import com.google.protobuf.InvalidProtocolBufferException; +import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.xcontent.XContentType; import org.junit.Before; import org.junit.Test; -import org.mockito.Mock; -import org.mockito.Mockito; import java.util.Base64; import java.util.HashMap; import static org.junit.Assert.*; -import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; public class ESUpdateRequestHandlerTest { - @Mock - private StencilClient stencilClient; + private final StencilClient stencilClient = StencilClientFactory.getClient(); private MessageToJson jsonSerializer; private Message messageWithJSON; @@ -47,10 +43,7 @@ public void setUp() throws InvalidProtocolBufferException { logMessage = "CgYIyOm+xgUSBgiE6r7GBRgNIICAgIDA9/y0LigCMAM\u003d"; messageWithProto = new Message(null, Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); String protoClassName = TestAggregatedSupplyMessage.class.getName(); - when(stencilClient.get(protoClassName)).thenReturn(TestAggregatedSupplyMessage.getDescriptor()); jsonSerializer = new MessageToJson(stencilClient.getParser(protoClassName), true, false); } diff --git a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpsertRequestHandlerTest.java b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpsertRequestHandlerTest.java index b2d6dbc40..7d25a8b65 100644 --- a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpsertRequestHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/ESUpsertRequestHandlerTest.java @@ -12,26 +12,23 @@ import com.google.gson.reflect.TypeToken; import com.google.protobuf.InvalidProtocolBufferException; +import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.xcontent.XContentType; import org.junit.Before; import org.junit.Test; -import org.mockito.Mock; -import org.mockito.Mockito; import java.util.Base64; import java.util.HashMap; import static org.junit.Assert.*; -import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; public class ESUpsertRequestHandlerTest { - @Mock - private StencilClient stencilClient; + private final StencilClient stencilClient = StencilClientFactory.getClient(); private MessageToJson jsonSerializer; private Message messageWithJSON; @@ -47,10 +44,7 @@ public void setUp() throws InvalidProtocolBufferException { logMessage = "CgYIyOm+xgUSBgiE6r7GBRgNIICAgIDA9/y0LigCMAM\u003d"; messageWithProto = new Message(null, Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); String protoClassName = TestAggregatedSupplyMessage.class.getName(); - when(stencilClient.get(protoClassName)).thenReturn(TestAggregatedSupplyMessage.getDescriptor()); jsonSerializer = new MessageToJson(stencilClient.getParser(protoClassName), true, false); } diff --git a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactoryTest.java b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactoryTest.java index 509387991..9822d89cd 100644 --- a/src/test/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/elasticsearch/request/EsRequestHandlerFactoryTest.java @@ -2,7 +2,7 @@ import io.odpf.firehose.config.EsSinkConfig; import io.odpf.firehose.config.enums.EsSinkMessageType; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageToJson; import io.odpf.firehose.config.enums.EsSinkRequestType; import org.junit.Before; @@ -19,7 +19,7 @@ public class EsRequestHandlerFactoryTest { private EsSinkConfig esSinkConfig; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private MessageToJson jsonSerializer; @@ -31,22 +31,22 @@ public void setUp() throws Exception { @Test public void shouldReturnInsertRequestHandler() { when(esSinkConfig.isSinkEsModeUpdateOnlyEnable()).thenReturn(false); - EsRequestHandlerFactory esRequestHandlerFactory = new EsRequestHandlerFactory(esSinkConfig, instrumentation, "id", + EsRequestHandlerFactory esRequestHandlerFactory = new EsRequestHandlerFactory(esSinkConfig, firehoseInstrumentation, "id", EsSinkMessageType.JSON, jsonSerializer, "customer_id", "booking", "order_number"); EsRequestHandler requestHandler = esRequestHandlerFactory.getRequestHandler(); - verify(instrumentation, times(1)).logInfo("ES request mode: {}", EsSinkRequestType.INSERT_OR_UPDATE); + verify(firehoseInstrumentation, times(1)).logInfo("ES request mode: {}", EsSinkRequestType.INSERT_OR_UPDATE); assertEquals(EsUpsertRequestHandler.class, requestHandler.getClass()); } @Test public void shouldReturnUpdateRequestHandler() { when(esSinkConfig.isSinkEsModeUpdateOnlyEnable()).thenReturn(true); - EsRequestHandlerFactory esRequestHandlerFactory = new EsRequestHandlerFactory(esSinkConfig, instrumentation, "id", + EsRequestHandlerFactory esRequestHandlerFactory = new EsRequestHandlerFactory(esSinkConfig, firehoseInstrumentation, "id", EsSinkMessageType.JSON, jsonSerializer, "customer_id", "booking", "order_number"); EsRequestHandler requestHandler = esRequestHandlerFactory.getRequestHandler(); - verify(instrumentation, times(1)).logInfo("ES request mode: {}", EsSinkRequestType.UPDATE_ONLY); + verify(firehoseInstrumentation, times(1)).logInfo("ES request mode: {}", EsSinkRequestType.UPDATE_ONLY); assertEquals(EsUpdateRequestHandler.class, requestHandler.getClass()); } } diff --git a/src/test/java/io/odpf/firehose/sink/grpc/GrpcClientTest.java b/src/test/java/io/odpf/firehose/sink/grpc/GrpcClientTest.java index f85f9595d..291d0b6b7 100644 --- a/src/test/java/io/odpf/firehose/sink/grpc/GrpcClientTest.java +++ b/src/test/java/io/odpf/firehose/sink/grpc/GrpcClientTest.java @@ -1,14 +1,11 @@ package io.odpf.firehose.sink.grpc; - - - import io.odpf.firehose.config.GrpcSinkConfig; import io.odpf.firehose.consumer.Error; import io.odpf.firehose.consumer.TestGrpcRequest; import io.odpf.firehose.consumer.TestGrpcResponse; import io.odpf.firehose.consumer.TestServerGrpc; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.grpc.client.GrpcClient; import com.google.protobuf.AbstractMessage; import com.google.protobuf.DynamicMessage; @@ -22,8 +19,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; +import org.mockito.Mockito; import org.mockito.stubbing.Stubber; import java.io.IOException; @@ -36,7 +32,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; public class GrpcClientTest { @@ -44,21 +39,14 @@ public class GrpcClientTest { private Server server; private GrpcClient grpcClient; private TestServerGrpc.TestServerImplBase testGrpcService; - private GrpcSinkConfig grpcSinkConfig; private RecordHeaders headers; private static final List HEADER_KEYS = Arrays.asList("test-header-key-1", "test-header-key-2"); private HeaderTestInterceptor headerTestInterceptor; - private StencilClient stencilClient; - private ManagedChannel managedChannel; - - @Mock - private Instrumentation instrumentation; @Before public void setup() throws IOException { - MockitoAnnotations.initMocks(this); - testGrpcService = mock(TestServerGrpc.TestServerImplBase.class); - when(testGrpcService.bindService()).thenCallRealMethod(); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + testGrpcService = Mockito.mock(TestServerGrpc.TestServerImplBase.class, CALLS_REAL_METHODS); headerTestInterceptor = new HeaderTestInterceptor(); headerTestInterceptor.setHeaderKeys(HEADER_KEYS); ServerServiceDefinition serviceDefinition = ServerInterceptors.intercept(testGrpcService.bindService(), Arrays.asList(headerTestInterceptor)); @@ -72,12 +60,13 @@ public void setup() throws IOException { config.put("SINK_GRPC_METHOD_URL", "io.odpf.firehose.consumer.TestServer/TestRpcMethod"); config.put("SINK_GRPC_RESPONSE_SCHEMA_PROTO_CLASS", "io.odpf.firehose.consumer.TestGrpcResponse"); - grpcSinkConfig = ConfigFactory.create(GrpcSinkConfig.class, config); - stencilClient = StencilClientFactory.getClient(); - managedChannel = ManagedChannelBuilder.forAddress(grpcSinkConfig.getSinkGrpcServiceHost(), grpcSinkConfig.getSinkGrpcServicePort()).usePlaintext().build(); - grpcClient = new GrpcClient(instrumentation, grpcSinkConfig, managedChannel, stencilClient); + GrpcSinkConfig grpcSinkConfig = ConfigFactory.create(GrpcSinkConfig.class, config); + StencilClient stencilClient = StencilClientFactory.getClient(); + ManagedChannel managedChannel = ManagedChannelBuilder.forAddress(grpcSinkConfig.getSinkGrpcServiceHost(), grpcSinkConfig.getSinkGrpcServicePort()).usePlaintext().build(); + grpcClient = new GrpcClient(firehoseInstrumentation, grpcSinkConfig, managedChannel, stencilClient); headers = new RecordHeaders(); } + @After public void tearDown() { if (server != null) { @@ -178,7 +167,6 @@ public void shouldReturnErrorWhenGrpcException() { } - private Stubber doAnswerProtoReponse(T response) { return doAnswer(invocation -> { StreamObserver responseObserver = (StreamObserver) invocation.getArguments()[1]; diff --git a/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkFactoryTest.java index 8042b3010..a2a6c5be8 100644 --- a/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkFactoryTest.java @@ -1,10 +1,8 @@ package io.odpf.firehose.sink.grpc; - - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.consumer.TestServerGrpc; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.Sink; import io.grpc.Server; import io.grpc.ServerBuilder; diff --git a/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkTest.java b/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkTest.java index dcb2a382c..5f731abf1 100644 --- a/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/grpc/GrpcSinkTest.java @@ -1,13 +1,11 @@ package io.odpf.firehose.sink.grpc; - - +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.message.Message; import io.odpf.firehose.consumer.TestGrpcResponse; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.grpc.client.GrpcClient; import com.google.protobuf.DynamicMessage; import io.odpf.stencil.client.StencilClient; @@ -22,7 +20,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; @@ -41,12 +38,12 @@ public class GrpcSinkTest { private StencilClient stencilClient; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Before public void setUp() { initMocks(this); - sink = new GrpcSink(instrumentation, grpcClient, stencilClient); + sink = new GrpcSink(firehoseInstrumentation, grpcClient, stencilClient); } @Test @@ -61,10 +58,10 @@ public void shouldWriteToSink() throws Exception { sink.pushMessage(Collections.singletonList(message)); verify(grpcClient, times(1)).execute(any(byte[].class), eq(headers)); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", 1); - verify(instrumentation, times(1)).logDebug("Response: {}", response); - verify(instrumentation, times(0)).logWarn("Grpc Service returned error"); - verify(instrumentation, times(1)).logDebug("Failed messages count: {}", 0); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", 1); + verify(firehoseInstrumentation, times(1)).logDebug("Response: {}", response); + verify(firehoseInstrumentation, times(0)).logWarn("Grpc Service returned error"); + verify(firehoseInstrumentation, times(1)).logDebug("Failed messages count: {}", 0); } @Test @@ -80,15 +77,15 @@ public void shouldReturnBackListOfFailedMessages() throws IOException, Deseriali assertFalse(failedMessages.isEmpty()); assertEquals(1, failedMessages.size()); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", 1); - verify(instrumentation, times(1)).logDebug("Response: {}", response); - verify(instrumentation, times(1)).logWarn("Grpc Service returned error"); - verify(instrumentation, times(1)).logDebug("Failed messages count: {}", 1); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", 1); + verify(firehoseInstrumentation, times(1)).logDebug("Response: {}", response); + verify(firehoseInstrumentation, times(1)).logWarn("Grpc Service returned error"); + verify(firehoseInstrumentation, times(1)).logDebug("Failed messages count: {}", 1); } @Test public void shouldCloseStencilClient() throws IOException { - sink = new GrpcSink(instrumentation, grpcClient, stencilClient); + sink = new GrpcSink(firehoseInstrumentation, grpcClient, stencilClient); sink.close(); verify(stencilClient, times(1)).close(); @@ -96,9 +93,9 @@ public void shouldCloseStencilClient() throws IOException { @Test public void shouldLogWhenClosingConnection() throws IOException { - sink = new GrpcSink(instrumentation, grpcClient, stencilClient); + sink = new GrpcSink(firehoseInstrumentation, grpcClient, stencilClient); sink.close(); - verify(instrumentation, times(1)).logInfo("GRPC connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("GRPC connection closing"); } } diff --git a/src/test/java/io/odpf/firehose/sink/http/HttpSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/http/HttpSinkFactoryTest.java index 007bdc26b..898fc59cc 100644 --- a/src/test/java/io/odpf/firehose/sink/http/HttpSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/HttpSinkFactoryTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.http; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.AbstractSink; import io.odpf.stencil.client.StencilClient; import org.gradle.internal.impldep.org.junit.Before; @@ -11,7 +11,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.mockserver.integration.ClientAndServer; import org.mockserver.verify.VerificationTimes; diff --git a/src/test/java/io/odpf/firehose/sink/http/HttpSinkTest.java b/src/test/java/io/odpf/firehose/sink/http/HttpSinkTest.java index b8a5619ea..74333ae72 100644 --- a/src/test/java/io/odpf/firehose/sink/http/HttpSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/HttpSinkTest.java @@ -5,7 +5,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.NeedToRetry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.types.Request; import io.odpf.stencil.client.StencilClient; import org.apache.http.Header; @@ -22,7 +22,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.net.URI; @@ -35,7 +35,7 @@ @RunWith(MockitoJUnitRunner.class) public class HttpSinkTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Request request; @Mock @@ -89,7 +89,7 @@ public void shouldCallHttpClientWithProperRequest() throws Exception { when(response.getEntity()).thenReturn(httpEntity, httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value1\"}, {\"key\":\"value2\"}]")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); @@ -112,7 +112,7 @@ public void shouldThrowNeedToRetryExceptionWhenResponseCodeIsGivenRange() throws when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("{\"key\":\"value\"}")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-505"), requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); @@ -130,7 +130,7 @@ public void shouldThrowNeedToRetryExceptionWhenResponseIsNull() throws Exception when(request.build(messages)).thenReturn(httpRequests); when(httpClient.execute(httpPut)).thenReturn(null); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); } @@ -149,7 +149,7 @@ public void shouldThrowNeedToRetryExceptionWhenResponseStatusCodeIsZero() throws when(response.getStatusLine()).thenReturn(statusLine); when(statusLine.getStatusCode()).thenReturn(0); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); } @@ -158,13 +158,13 @@ public void shouldThrowNeedToRetryExceptionWhenResponseStatusCodeIsZero() throws public void shouldCatchURISyntaxExceptionAndThrowIOException() throws URISyntaxException, DeserializerException, IOException { when(request.build(messages)).thenThrow(new URISyntaxException("", "")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); } @Test public void shouldCloseStencilClient() throws IOException { - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.close(); verify(stencilClient, times(1)).close(); @@ -172,10 +172,10 @@ public void shouldCloseStencilClient() throws IOException { @Test public void shouldLogConnectionClosing() throws IOException { - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.close(); - verify(instrumentation, times(1)).logInfo("HTTP connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("HTTP connection closing"); } @Test @@ -195,17 +195,17 @@ public void shouldLogEntireRequestIfInStatusCodeRangeWithBatchRequestAndCaptureD when(response.getAllHeaders()).thenReturn(new Header[]{new BasicHeader("Accept", "text/plain")}); when(response.getEntity()).thenReturn(httpEntity); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-505")); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo( "\nRequest Method: PUT" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" + "\nRequest Body: [{\"key\":\"value1\"},{\"key\":\"value2\"}]"); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 2, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 2L, "cause= 500"); } @Test @@ -226,17 +226,17 @@ public void shouldLogEntireRequestIfInStatusCodeRangeWithIndividualRequestAndCap when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value\"}]")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-505")); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo( "\nRequest Method: PUT" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" + "\nRequest Body: [{\"key\":\"value\"}]"); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1L, "cause= 500"); } @Test @@ -257,17 +257,17 @@ public void shouldLogEntireRequestIfInStatusCodeRangeWithSingleListRequestBodyAn when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value\"}]")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-505")); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo( "\nRequest Method: PUT" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" + "\nRequest Body: [{\"key\":\"value\"}]"); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1L, "cause= 500"); } @Test @@ -286,11 +286,11 @@ public void shouldNotLogEntireRequestIfNotInStatusCodeRange() throws Exception { when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value1\"},{\"key\":\"value2\"}]")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-499")); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(0)).logInfo( + verify(firehoseInstrumentation, times(0)).logInfo( "\nRequest Method: PUT" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" @@ -313,12 +313,12 @@ public void shouldCaptureDroppedMessagesMetricsIfNotInStatusCodeRange() throws E when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value1\"},{\"key\":\"value2\"}]")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-499"), requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 2, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 2L, "cause= 500"); } @Test(expected = NeedToRetry.class) @@ -335,14 +335,14 @@ public void shouldNotCaptureDroppedMessagesMetricsIfInStatusCodeRange() throws E when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("{\"key\":\"value\"}")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-600"), requestLogStatusCodeRanges); httpSink.prepare(messages); try { httpSink.execute(); } finally { - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(0)).captureCount("messages.dropped.count", 1, "500"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(0)).captureCount("messages.dropped.count", 1L, "500"); } } @@ -360,12 +360,12 @@ public void shouldNotCaptureDroppedMessagesMetricsIfStatusCodeIs200() throws Exc when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("{\"key\":\"value\"}")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(0)).captureCount("messages.dropped.count", 1, "200"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(0)).captureCount("messages.dropped.count", 1L, "200"); } @Test @@ -382,12 +382,12 @@ public void shouldNotCaptureDroppedMessagesMetricsIfStatusCodeIs201() throws Exc when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("{\"key\":\"value\"}")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(0)).captureCount("messages.dropped.count", 1, "201"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(0)).captureCount("messages.dropped.count", 1L, "201"); } @Test @@ -405,12 +405,12 @@ public void shouldCaptureResponseStatusCount() throws Exception { when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("{\"key\":\"value\"}")); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).captureCount("firehose_sink_http_response_code_total", 1, "status_code=" + statusLine.getStatusCode()); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_http_response_code_total", 1L, "status_code=" + statusLine.getStatusCode()); } @Test @@ -426,13 +426,13 @@ public void shouldLogResponseBodyWhenDebugIsEnabledAndNonNullResponse() throws E when(response.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(new StringInputStream("[{\"key\":\"value1\"},{\"key\":\"value2\"}]")); when(request.build(messages)).thenReturn(httpRequests); - when(instrumentation.isDebugEnabled()).thenReturn(true); + when(firehoseInstrumentation.isDebugEnabled()).thenReturn(true); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(1)).logDebug("Response Body: [{\"key\":\"value1\"},{\"key\":\"value2\"}]"); + verify(firehoseInstrumentation, times(1)).logDebug("Response Body: [{\"key\":\"value1\"},{\"key\":\"value2\"}]"); } @Test @@ -447,12 +447,12 @@ public void shouldNotLogResponseBodyWhenDebugIsEnabledWithNullHttpResponseEntity when(httpClient.execute(httpPut)).thenReturn(response); when(response.getEntity()).thenReturn(null); when(request.build(messages)).thenReturn(httpRequests); - when(instrumentation.isDebugEnabled()).thenReturn(true); + when(firehoseInstrumentation.isDebugEnabled()).thenReturn(true); - HttpSink httpSink = new HttpSink(instrumentation, request, httpClient, stencilClient, + HttpSink httpSink = new HttpSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); httpSink.prepare(messages); httpSink.execute(); - verify(instrumentation, times(0)).logDebug(any()); + verify(firehoseInstrumentation, times(0)).logDebug(any()); } } diff --git a/src/test/java/io/odpf/firehose/sink/http/auth/OAuth2CredentialTest.java b/src/test/java/io/odpf/firehose/sink/http/auth/OAuth2CredentialTest.java index 19c948282..28bc1ae77 100644 --- a/src/test/java/io/odpf/firehose/sink/http/auth/OAuth2CredentialTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/auth/OAuth2CredentialTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.http.auth; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.depot.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import com.google.gson.JsonSyntaxException; import okhttp3.OkHttpClient; import okhttp3.Request; @@ -49,7 +49,7 @@ public void setUp() { String clientSecret = "clientSecret"; String scope = "order:read"; String accessTokenEndpoint = "http://127.0.0.1:1080/oauth2/token"; - oAuth2Credential = new OAuth2Credential(new Instrumentation(statsDReporter, OAuth2Credential.class), clientId, clientSecret, scope, accessTokenEndpoint); + oAuth2Credential = new OAuth2Credential(new FirehoseInstrumentation(statsDReporter, OAuth2Credential.class), clientId, clientSecret, scope, accessTokenEndpoint); httpClient = oAuth2Credential.initialize(HttpClients.custom()).build(); okHttpClient = new OkHttpClient.Builder().addInterceptor(oAuth2Credential).build(); } diff --git a/src/test/java/io/odpf/firehose/sink/http/request/RequestFactoryTest.java b/src/test/java/io/odpf/firehose/sink/http/request/RequestFactoryTest.java index d77ba0781..a7f7cbedf 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/RequestFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/RequestFactoryTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.http.request; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.http.request.types.SimpleRequest; import io.odpf.firehose.sink.http.request.types.DynamicUrlRequest; import io.odpf.firehose.sink.http.request.types.ParameterizedHeaderRequest; diff --git a/src/test/java/io/odpf/firehose/sink/http/request/body/JsonBodyTest.java b/src/test/java/io/odpf/firehose/sink/http/request/body/JsonBodyTest.java index 8f8ddd424..2203cdc0a 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/body/JsonBodyTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/body/JsonBodyTest.java @@ -14,7 +14,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class JsonBodyTest { diff --git a/src/test/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreatorTest.java b/src/test/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreatorTest.java index 988cffa97..6d76bf055 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreatorTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/create/BatchRequestCreatorTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; import io.odpf.firehose.sink.http.request.header.HeaderBuilder; @@ -27,10 +27,7 @@ import java.util.List; import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class BatchRequestCreatorTest { @@ -48,7 +45,7 @@ public class BatchRequestCreatorTest { private JsonBody jsonBody; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private List messages; @@ -61,21 +58,21 @@ public void setup() { @Test public void shouldWrapMessageToASingleRequest() throws DeserializerException, URISyntaxException { - BatchRequestCreator batchRequestCreator = new BatchRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + BatchRequestCreator batchRequestCreator = new BatchRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = batchRequestCreator.create(messages, requestEntityBuilder); assertEquals(1, requests.size()); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages), HttpSinkRequestMethodType.PUT); } @Test public void shouldWrapMessageToASingleRequestWhenPostRequest() throws DeserializerException, URISyntaxException { - BatchRequestCreator batchRequestCreator = new BatchRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); + BatchRequestCreator batchRequestCreator = new BatchRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); List requests = batchRequestCreator.create(messages, requestEntityBuilder); assertEquals(1, requests.size()); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages), HttpSinkRequestMethodType.POST); } @@ -87,11 +84,11 @@ public void shouldWrapMessagesToASingleRequest() throws DeserializerException, U messages.add(message1); messages.add(message2); - BatchRequestCreator batchRequestCreator = new BatchRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + BatchRequestCreator batchRequestCreator = new BatchRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = batchRequestCreator.create(messages, requestEntityBuilder); assertEquals(1, requests.size()); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages), HttpSinkRequestMethodType.PUT); } @@ -103,13 +100,13 @@ public void shouldSetRequestPropertiesOnlyOnce() throws DeserializerException, U messages.add(message1); messages.add(message2); - BatchRequestCreator batchRequestCreator = new BatchRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); + BatchRequestCreator batchRequestCreator = new BatchRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); batchRequestCreator.create(messages, requestEntityBuilder); verify(uriBuilder, times(1)).build(); verify(headerBuilder, times(1)).build(); verify(requestEntityBuilder, times(1)).buildHttpEntity(any(String.class)); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages), HttpSinkRequestMethodType.POST); } @@ -134,7 +131,7 @@ public void shouldProperlyBuildRequests() throws DeserializerException, URISynta when(jsonBody.serialize(messages)).thenReturn(serializedMessages); when(requestEntityBuilder.buildHttpEntity(any())).thenReturn(new StringEntity("[\"dummyMessage1\", \"dummyMessage2\"]", ContentType.APPLICATION_JSON)); - BatchRequestCreator batchRequestCreator = new BatchRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); + BatchRequestCreator batchRequestCreator = new BatchRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.POST, jsonBody); List httpEntityEnclosingRequestBases = batchRequestCreator.create(messages, requestEntityBuilder); BasicHeader header1 = new BasicHeader("Authorization", "auth_token"); @@ -145,7 +142,7 @@ public void shouldProperlyBuildRequests() throws DeserializerException, URISynta assertEquals(new URI("dummyEndpoint"), httpEntityEnclosingRequestBases.get(0).getURI()); Assert.assertTrue(new ReflectionEquals(httpEntityEnclosingRequestBases.get(0).getAllHeaders()).matches(headers)); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages), HttpSinkRequestMethodType.POST); } } diff --git a/src/test/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreatorTest.java b/src/test/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreatorTest.java index d76fe7f6f..efb30fe21 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreatorTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/create/IndividualRequestCreatorTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; import io.odpf.firehose.sink.http.request.header.HeaderBuilder; @@ -21,10 +21,7 @@ import java.util.List; import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class IndividualRequestCreatorTest { @@ -41,7 +38,7 @@ public class IndividualRequestCreatorTest { private JsonBody jsonBody; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Before public void setup() { @@ -62,13 +59,13 @@ public void shouldProduceIndividualRequests() throws DeserializerException, URIS serializedMessages.add("dummyMessage2"); when(jsonBody.serialize(messages)).thenReturn(serializedMessages); - IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = individualRequestCreator.create(messages, requestEntityBuilder); assertEquals(2, requests.size()); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(0), HttpSinkRequestMethodType.PUT); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(1), HttpSinkRequestMethodType.PUT); } @@ -85,15 +82,15 @@ public void shouldSetRequestPropertiesMultipleTimes() throws DeserializerExcepti serializedMessages.add("dummyMessage2"); when(jsonBody.serialize(messages)).thenReturn(serializedMessages); - IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); individualRequestCreator.create(messages, requestEntityBuilder); verify(uriBuilder, times(2)).build(any(Message.class)); verify(headerBuilder, times(2)).build(any(Message.class)); verify(requestEntityBuilder, times(2)).buildHttpEntity(any(String.class)); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(0), HttpSinkRequestMethodType.PUT); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(1), HttpSinkRequestMethodType.PUT); } @@ -110,13 +107,13 @@ public void shouldProduceIndividualRequestsWhenPUTRequest() throws DeserializerE serializedMessages.add("dummyMessage2"); when(jsonBody.serialize(messages)).thenReturn(serializedMessages); - IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = individualRequestCreator.create(messages, requestEntityBuilder); assertEquals(2, requests.size()); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(0), HttpSinkRequestMethodType.PUT); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(1), HttpSinkRequestMethodType.PUT); } @@ -135,7 +132,7 @@ public void shouldWrapEntityToArrayIfSet() throws DeserializerException, URISynt requestEntityBuilder = new RequestEntityBuilder().setWrapping(true); - IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = individualRequestCreator.create(messages, requestEntityBuilder); byte[] bytes1 = IOUtils.toByteArray(requests.get(0).getEntity().getContent()); @@ -143,9 +140,9 @@ public void shouldWrapEntityToArrayIfSet() throws DeserializerException, URISynt Assert.assertEquals("[dummyMessage1]", new String(bytes1)); Assert.assertEquals("[dummyMessage2]", new String(bytes2)); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(0), HttpSinkRequestMethodType.PUT); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(1), HttpSinkRequestMethodType.PUT); } @@ -164,7 +161,7 @@ public void shouldNotWrapEntityToArrayIfNot() throws DeserializerException, URIS requestEntityBuilder = new RequestEntityBuilder().setWrapping(false); - IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(instrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); + IndividualRequestCreator individualRequestCreator = new IndividualRequestCreator(firehoseInstrumentation, uriBuilder, headerBuilder, HttpSinkRequestMethodType.PUT, jsonBody); List requests = individualRequestCreator.create(messages, requestEntityBuilder); byte[] bytes1 = IOUtils.toByteArray(requests.get(0).getEntity().getContent()); @@ -172,9 +169,9 @@ public void shouldNotWrapEntityToArrayIfNot() throws DeserializerException, URIS Assert.assertEquals("dummyMessage1", new String(bytes1)); Assert.assertEquals("dummyMessage2", new String(bytes2)); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(0), HttpSinkRequestMethodType.PUT); - verify(instrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", + verify(firehoseInstrumentation, times(1)).logDebug("\nRequest URL: {}\nRequest headers: {}\nRequest content: {}\nRequest method: {}", uriBuilder.build(), headerBuilder.build(), jsonBody.serialize(messages).get(1), HttpSinkRequestMethodType.PUT); } } diff --git a/src/test/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequestTest.java b/src/test/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequestTest.java index a35f1daf5..4d8f039c1 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequestTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/types/DynamicUrlRequestTest.java @@ -1,11 +1,11 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkDataFormatType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; import io.odpf.firehose.sink.http.request.header.HeaderBuilder; @@ -20,10 +20,7 @@ import java.util.List; import static org.gradle.internal.impldep.org.junit.Assert.assertFalse; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class DynamicUrlRequestTest { diff --git a/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequestTest.java b/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequestTest.java index 59994b5d9..01ca02c59 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequestTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedHeaderRequestTest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkDataFormatType; import io.odpf.firehose.config.enums.HttpSinkParameterPlacementType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; @@ -23,10 +23,7 @@ import static org.gradle.internal.impldep.org.junit.Assert.assertFalse; import static org.gradle.internal.impldep.org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class ParameterizedHeaderRequestTest { diff --git a/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequestTest.java b/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequestTest.java index 1f00953ab..ec64c77e0 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequestTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/types/ParameterizedUriRequestTest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkDataFormatType; import io.odpf.firehose.config.enums.HttpSinkParameterPlacementType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; @@ -23,10 +23,7 @@ import static org.gradle.internal.impldep.org.junit.Assert.assertFalse; import static org.gradle.internal.impldep.org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class ParameterizedUriRequestTest { diff --git a/src/test/java/io/odpf/firehose/sink/http/request/types/SimpleRequestTest.java b/src/test/java/io/odpf/firehose/sink/http/request/types/SimpleRequestTest.java index b659c8dd4..423a5679a 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/types/SimpleRequestTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/types/SimpleRequestTest.java @@ -1,11 +1,11 @@ package io.odpf.firehose.sink.http.request.types; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.HttpSinkConfig; import io.odpf.firehose.config.enums.HttpSinkRequestMethodType; import io.odpf.firehose.config.enums.HttpSinkDataFormatType; import io.odpf.firehose.config.enums.HttpSinkParameterSourceType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.http.request.body.JsonBody; import io.odpf.firehose.sink.http.request.entity.RequestEntityBuilder; import io.odpf.firehose.sink.http.request.header.HeaderBuilder; @@ -20,10 +20,7 @@ import static org.gradle.internal.impldep.org.junit.Assert.assertFalse; import static org.gradle.internal.impldep.org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class SimpleRequestTest { diff --git a/src/test/java/io/odpf/firehose/sink/http/request/uri/UriParserTest.java b/src/test/java/io/odpf/firehose/sink/http/request/uri/UriParserTest.java index d98bb2980..fa0969200 100644 --- a/src/test/java/io/odpf/firehose/sink/http/request/uri/UriParserTest.java +++ b/src/test/java/io/odpf/firehose/sink/http/request/uri/UriParserTest.java @@ -17,8 +17,7 @@ import org.mockito.Mock; import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; public class UriParserTest { diff --git a/src/test/java/io/odpf/firehose/sink/influxdb/InfluxSinkTest.java b/src/test/java/io/odpf/firehose/sink/influxdb/InfluxSinkTest.java index a6884d8a0..bcdcb8454 100644 --- a/src/test/java/io/odpf/firehose/sink/influxdb/InfluxSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/influxdb/InfluxSinkTest.java @@ -9,7 +9,7 @@ import io.odpf.firehose.consumer.TestFeedbackLogKey; import io.odpf.firehose.consumer.TestFeedbackLogMessage; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.Sink; import com.google.protobuf.DynamicMessage; import com.google.protobuf.Timestamp; @@ -25,7 +25,7 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.Collections; @@ -67,7 +67,7 @@ public class InfluxSinkTest { private Parser protoParser; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private StencilClient mockStencilClient; @@ -101,7 +101,7 @@ public void shouldPrepareBatchPoints() throws IOException, DeserializerException DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestBookingLogMessage.getDescriptor()).build(); when(protoParser.parse(any())).thenReturn(dynamicMessage); - InfluxSinkStub influx = new InfluxSinkStub(instrumentation, "influx", config, protoParser, client, stencilClient); + InfluxSinkStub influx = new InfluxSinkStub(firehoseInstrumentation, "influx", config, protoParser, client, stencilClient); influx.prepare(messages); verify(protoParser, times(1)).parse(message.getLogMessage()); @@ -114,7 +114,7 @@ public void shouldPushTagsAsStringValues() throws DeserializerException, IOExcep setupTagNameIndexMappingProperties(); config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); ArgumentCaptor batchPointsArgumentCaptor = ArgumentCaptor.forClass(BatchPoints.class); @@ -130,7 +130,7 @@ public void shouldThrowExceptionOnEmptyFieldNameIndexMapping() throws IOExceptio props.setProperty("SINK_INFLUX_FIELD_NAME_PROTO_INDEX_MAPPING", emptyFieldNameIndex); props.setProperty("SINK_INFLUX_TAG_NAME_PROTO_INDEX_MAPPING", emptyTagNameIndexMapping); config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); try { sink.pushMessage(messages); @@ -145,13 +145,13 @@ public void shouldPushMessagesWithType() throws DeserializerException, IOExcepti setupFieldNameIndexMappingProperties(); props.setProperty("SINK_INFLUX_TAG_NAME_PROTO_INDEX_MAPPING", emptyTagNameIndexMapping); config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); ArgumentCaptor batchPointsArgumentCaptor = ArgumentCaptor.forClass(BatchPoints.class); sink.pushMessage(messages); - verify(instrumentation, times(1)).capturePreExecutionLatencies(messages); - verify(instrumentation, times(1)).startExecution(); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); + verify(firehoseInstrumentation, times(1)).capturePreExecutionLatencies(messages); + verify(firehoseInstrumentation, times(1)).startExecution(); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); verify(client, times(1)).write(batchPointsArgumentCaptor.capture()); List batchPointsList = batchPointsArgumentCaptor.getAllValues(); @@ -162,7 +162,7 @@ public void shouldPushMessagesWithType() throws DeserializerException, IOExcepti public void shouldCloseStencilClient() throws IOException { config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, mockStencilClient.getParser(config.getInputSchemaProtoClass()), client, mockStencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, mockStencilClient.getParser(config.getInputSchemaProtoClass()), client, mockStencilClient); sink.close(); verify(mockStencilClient, times(1)).close(); @@ -172,10 +172,10 @@ public void shouldCloseStencilClient() throws IOException { public void shouldLogWhenClosingConnection() throws IOException { config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, mockStencilClient.getParser(config.getInputSchemaProtoClass()), client, mockStencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, mockStencilClient.getParser(config.getInputSchemaProtoClass()), client, mockStencilClient); sink.close(); - verify(instrumentation, times(1)).logInfo("InfluxDB connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("InfluxDB connection closing"); } @Test @@ -184,16 +184,16 @@ public void shouldLogDataPointAndBatchPoints() throws IOException, DeserializerE setupTagNameIndexMappingProperties(); config = ConfigFactory.create(InfluxSinkConfig.class, props); - sink = new InfluxSink(instrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); + sink = new InfluxSink(firehoseInstrumentation, "influx", config, stencilClient.getParser(config.getInputSchemaProtoClass()), client, stencilClient); ArgumentCaptor batchPointsArgumentCaptor = ArgumentCaptor.forClass(BatchPoints.class); sink.pushMessage(messages); verify(client, times(1)).write(batchPointsArgumentCaptor.capture()); List batchPointsList = batchPointsArgumentCaptor.getAllValues(); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); - verify(instrumentation, times(1)).logDebug("Data point: {}", batchPointsList.get(0).getPoints().get(0).toString()); - verify(instrumentation, times(1)).logDebug("Batch points: {}", batchPointsList.get(0).toString()); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); + verify(firehoseInstrumentation, times(1)).logDebug("Data point: {}", batchPointsList.get(0).getPoints().get(0).toString()); + verify(firehoseInstrumentation, times(1)).logDebug("Batch points: {}", batchPointsList.get(0).toString()); } private void setupFieldNameIndexMappingProperties() { @@ -205,8 +205,8 @@ private void setupTagNameIndexMappingProperties() { } public class InfluxSinkStub extends InfluxSink { - public InfluxSinkStub(Instrumentation instrumentation, String sinkType, InfluxSinkConfig config, Parser protoParser, InfluxDB client, StencilClient stencilClient) { - super(instrumentation, sinkType, config, protoParser, client, stencilClient); + public InfluxSinkStub(FirehoseInstrumentation firehoseInstrumentation, String sinkType, InfluxSinkConfig config, Parser protoParser, InfluxDB client, StencilClient stencilClient) { + super(firehoseInstrumentation, sinkType, config, protoParser, client, stencilClient); } public void prepare(List messageList) throws IOException { diff --git a/src/test/java/io/odpf/firehose/sink/jdbc/HikariJdbcConnectionPoolTest.java b/src/test/java/io/odpf/firehose/sink/jdbc/HikariJdbcConnectionPoolTest.java index f876d7536..f2859b182 100644 --- a/src/test/java/io/odpf/firehose/sink/jdbc/HikariJdbcConnectionPoolTest.java +++ b/src/test/java/io/odpf/firehose/sink/jdbc/HikariJdbcConnectionPoolTest.java @@ -6,7 +6,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.sql.Connection; import java.sql.SQLException; diff --git a/src/test/java/io/odpf/firehose/sink/jdbc/JdbcSinkTest.java b/src/test/java/io/odpf/firehose/sink/jdbc/JdbcSinkTest.java index 7a3a1e861..80031cb0b 100644 --- a/src/test/java/io/odpf/firehose/sink/jdbc/JdbcSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/jdbc/JdbcSinkTest.java @@ -3,14 +3,14 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.stencil.client.StencilClient; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.sql.Connection; @@ -35,7 +35,7 @@ public class JdbcSinkTest { private StencilClient stencilClient; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private JdbcConnectionPool jdbcConnectionPool; @@ -50,8 +50,8 @@ public class JdbcSinkTest { public void setUp() throws SQLException { when(jdbcConnectionPool.getConnection()).thenReturn(connection); when(connection.createStatement()).thenReturn(statement); - when(instrumentation.startExecution()).thenReturn(Instant.now()); - jdbcSink = new JdbcSink(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient); + when(firehoseInstrumentation.startExecution()).thenReturn(Instant.now()); + jdbcSink = new JdbcSink(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient); } @Test @@ -68,8 +68,8 @@ public void shouldUseBatchForPushMessage() throws SQLException, IOException, Des new Message(new byte[0], new byte[0], "topic", 0, 100)); jdbcSink.pushMessage(messages); - verify(instrumentation, times(1)).startExecution(); - verify(instrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); + verify(firehoseInstrumentation, times(1)).startExecution(); + verify(firehoseInstrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); } @Test @@ -78,11 +78,11 @@ public void shouldCallStartExecutionBeforeCaptureSuccessAttempt() throws IOExcep new Message(new byte[0], new byte[0], "topic", 0, 100)); jdbcSink.pushMessage(messages); - verify(instrumentation, times(1)).startExecution(); - verify(instrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); - InOrder inOrder = inOrder(instrumentation); - inOrder.verify(instrumentation).startExecution(); - inOrder.verify(instrumentation).captureSinkExecutionTelemetry("db", messages.size()); + verify(firehoseInstrumentation, times(1)).startExecution(); + verify(firehoseInstrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); + InOrder inOrder = inOrder(firehoseInstrumentation); + inOrder.verify(firehoseInstrumentation).startExecution(); + inOrder.verify(firehoseInstrumentation).captureSinkExecutionTelemetry("db", messages.size()); } @Test @@ -91,7 +91,7 @@ public void shouldReturnEmptyListWhenNoException() throws IOException, Deseriali new Message(new byte[0], new byte[0], "topic", 0, 100)); assertEquals(jdbcSink.pushMessage(messages).size(), 0); - verify(instrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); + verify(firehoseInstrumentation, times(1)).captureSinkExecutionTelemetry("db", messages.size()); } @Test @@ -100,7 +100,7 @@ public void shouldPrepareBatchForQueries() throws SQLException { List messages = Arrays.asList(new Message(new byte[0], new byte[0], "topic", 0, 100), new Message(new byte[0], new byte[0], "topic", 0, 100)); - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queries); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queries); dbSinkStub.prepare(messages); verify(statement, times(queries.size())).addBatch(anyString()); @@ -112,7 +112,7 @@ public void shouldReleaseConnectionAfterSuccessfulQuery() throws Exception { String sql = "select * from table"; List messages = Arrays.asList(new Message(new byte[0], new byte[0], "topic", 0, 100), new Message(new byte[0], new byte[0], "topic", 0, 100)); - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); dbSinkStub.pushMessage(messages); @@ -132,7 +132,7 @@ public void shouldReleaseConnectionOnException() throws Exception { @Test public void shouldNotReleaseConnectionWhenNull() throws Exception { String sql = "select * from table"; - JdbcSink sink = new JdbcSink(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, statement, null); + JdbcSink sink = new JdbcSink(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, statement, null); sink.execute(); @@ -143,7 +143,7 @@ public void shouldNotReleaseConnectionWhenNull() throws Exception { @Test public void shouldCloseConnectionPool() throws IOException, InterruptedException { String sql = "select * from table"; - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); dbSinkStub.close(); verify(jdbcConnectionPool, times(1)).shutdown(); @@ -152,7 +152,7 @@ public void shouldCloseConnectionPool() throws IOException, InterruptedException @Test public void shouldCloseStencilClient() throws IOException { String sql = "select * from table"; - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); dbSinkStub.close(); verify(stencilClient, times(1)).close(); @@ -161,10 +161,10 @@ public void shouldCloseStencilClient() throws IOException { @Test public void shouldLogWhenClosingConnection() throws IOException { String sql = "select * from table"; - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, Arrays.asList(sql)); dbSinkStub.close(); - verify(instrumentation, times(1)).logInfo("Database connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("Database connection closing"); } @Test(expected = IOException.class) @@ -172,7 +172,7 @@ public void shouldThrowIOExceptionWhenFailToClose() throws InterruptedException, doThrow(InterruptedException.class).when(jdbcConnectionPool).shutdown(); List queriesList = Arrays.asList("select * from table", "select * from table2"); - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queriesList); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queriesList); dbSinkStub.close(); } @@ -182,7 +182,7 @@ public void shouldLogQueryString() { Message message = new Message("key".getBytes(), "msg".getBytes(), "topic1", 0, 100); jdbcSink.createQueries(Arrays.asList(message)); - verify(instrumentation, times(1)).logDebug(queryTemplate.toQueryString(message)); + verify(firehoseInstrumentation, times(1)).logDebug(queryTemplate.toQueryString(message)); } @Test @@ -193,21 +193,21 @@ public void shouldLogDbResponse() throws Exception { List messages = Arrays.asList(message); when(statement.executeBatch()).thenReturn(updateCounts); - JdbcSinkStub dbSinkStub = new JdbcSinkStub(instrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queries); + JdbcSinkStub dbSinkStub = new JdbcSinkStub(firehoseInstrumentation, "db", jdbcConnectionPool, queryTemplate, stencilClient, queries); dbSinkStub.pushMessage(messages); verify(statement, times(1)).addBatch("select * from table"); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", 1); - verify(instrumentation, times(1)).logDebug("DB response: {}", Arrays.toString(updateCounts)); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", 1); + verify(firehoseInstrumentation, times(1)).logDebug("DB response: {}", Arrays.toString(updateCounts)); } public class JdbcSinkStub extends JdbcSink { private List queries; - public JdbcSinkStub(Instrumentation instrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient, List queries) { - super(instrumentation, sinkType, pool, queryTemplate, stencilClient); + public JdbcSinkStub(FirehoseInstrumentation firehoseInstrumentation, String sinkType, JdbcConnectionPool pool, QueryTemplate queryTemplate, StencilClient stencilClient, List queries) { + super(firehoseInstrumentation, sinkType, pool, queryTemplate, stencilClient); this.queries = queries; } diff --git a/src/test/java/io/odpf/firehose/sink/jdbc/QueryTemplateTest.java b/src/test/java/io/odpf/firehose/sink/jdbc/QueryTemplateTest.java index f07da7168..feaa2f5f5 100644 --- a/src/test/java/io/odpf/firehose/sink/jdbc/QueryTemplateTest.java +++ b/src/test/java/io/odpf/firehose/sink/jdbc/QueryTemplateTest.java @@ -8,7 +8,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.sql.SQLException; diff --git a/src/test/java/io/odpf/firehose/sink/log/LogSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/log/LogSinkFactoryTest.java deleted file mode 100644 index 4c4906dd4..000000000 --- a/src/test/java/io/odpf/firehose/sink/log/LogSinkFactoryTest.java +++ /dev/null @@ -1,38 +0,0 @@ -package io.odpf.firehose.sink.log; - - -import io.odpf.firehose.metrics.StatsDReporter; -import io.odpf.firehose.sink.Sink; -import io.odpf.stencil.client.StencilClient; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class LogSinkFactoryTest { - - private Map configuration; - - @Mock - private StatsDReporter statsDReporter; - - @Mock - private StencilClient stencilClient; - - @Before - public void setUp() { - configuration = new HashMap<>(); - MockitoAnnotations.initMocks(this); - } - - @Test - public void shouldCreateLogSink() { - Sink sink = LogSinkFactory.create(configuration, statsDReporter, stencilClient); - assertEquals(LogSink.class, sink.getClass()); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/log/LogSinkTest.java b/src/test/java/io/odpf/firehose/sink/log/LogSinkTest.java deleted file mode 100644 index 886618ffa..000000000 --- a/src/test/java/io/odpf/firehose/sink/log/LogSinkTest.java +++ /dev/null @@ -1,86 +0,0 @@ -package io.odpf.firehose.sink.log; - -import io.odpf.firehose.message.Message; -import io.odpf.firehose.consumer.TestKey; -import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.sink.Sink; -import com.google.protobuf.DynamicMessage; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -@RunWith(MockitoJUnitRunner.class) -public class LogSinkTest { - - @Mock - private Instrumentation instrumentation; - - @Mock - private KeyOrMessageParser parser; - - private final DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestMessage.getDescriptor()).build(); - - private Sink sink; - - @Before - public void setup() throws IOException { - Mockito.when(parser.parse(Mockito.any(Message.class))).thenReturn(dynamicMessage); - - sink = new LogSink(parser, instrumentation); - } - - @Test - public void shouldPrintProto() throws IOException { - List messages = Collections.singletonList(new Message(new byte[0], new byte[0], "topic", 0, 100)); - - sink.pushMessage(messages); - - Mockito.verify(instrumentation, Mockito.times(1)).logInfo( - Mockito.eq("\n================= DATA =======================\n{}"), - Mockito.any(DynamicMessage.class)); - } - - @Test - public void shouldParseProto() throws IOException { - List messages = Arrays.asList(new Message(new byte[0], new byte[0], "topic", 0, 100), - new Message(new byte[0], new byte[0], "topic-2", 0, 100)); - - sink.pushMessage(messages); - - Mockito.verify(parser, Mockito.times(2)).parse(Mockito.any(Message.class)); - } - - @Test - public void shouldPrintTestProto() throws IOException { - TestKey testKey = TestKey.getDefaultInstance(); - TestMessage testMessage = TestMessage.getDefaultInstance(); - List messages = Collections.singletonList(new Message(testKey.toByteArray(), testMessage.toByteArray(), "topic", 0, 100)); - - sink.pushMessage(messages); - - Mockito.verify(instrumentation, Mockito.times(1)).logInfo( - Mockito.eq("\n================= DATA =======================\n{}"), - Mockito.any(DynamicMessage.class)); - } - - @Test - public void shouldSkipParsingAndNotFailIfKeyIsNull() throws IOException { - byte[] testMessage = TestMessage.getDefaultInstance().toByteArray(); - - sink.pushMessage(Collections.singletonList(new Message(null, testMessage, "topic", 0, 100))); - - Mockito.verify(instrumentation, Mockito.times(1)).logInfo( - Mockito.eq("\n================= DATA =======================\n{}"), - Mockito.any(DynamicMessage.class)); - Mockito.verify(parser, Mockito.never()).parse(null); - } -} diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkFactoryTest.java index ad953bc1c..3a64e1483 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkFactoryTest.java @@ -2,7 +2,7 @@ import com.mongodb.MongoClient; import io.odpf.firehose.config.MongoSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; import org.junit.Rule; @@ -27,7 +27,7 @@ public class MongoSinkFactoryTest { private Map configuration; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private Method buildMongoClientMethod; @@ -35,7 +35,7 @@ public class MongoSinkFactoryTest { public void setUp() { try { - buildMongoClientMethod = MongoSinkFactory.class.getDeclaredMethod("buildMongoClient", MongoSinkConfig.class, Instrumentation.class); + buildMongoClientMethod = MongoSinkFactory.class.getDeclaredMethod("buildMongoClient", MongoSinkConfig.class, FirehoseInstrumentation.class); } catch (NoSuchMethodException e) { e.printStackTrace(); } @@ -66,7 +66,7 @@ public void shouldCreateMongoClientWithValidAuthentication() { MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); MongoClient mongoClient = null; try { - mongoClient = (MongoClient) buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, instrumentation); + mongoClient = (MongoClient) buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, firehoseInstrumentation); } catch (IllegalAccessException | InvocationTargetException e) { e.printStackTrace(); } @@ -81,7 +81,7 @@ public void shouldCreateMongoClientWithNoAuthentication() { MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); MongoClient mongoClient = null; try { - mongoClient = (MongoClient) buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, instrumentation); + mongoClient = (MongoClient) buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, firehoseInstrumentation); } catch (IllegalAccessException | InvocationTargetException e) { e.printStackTrace(); } @@ -99,7 +99,7 @@ public void shouldThrowExceptionWhenCreateMongoClientWithNullUsername() { MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); try { - buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, instrumentation); + buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, firehoseInstrumentation); } catch (InvocationTargetException e) { throw (IllegalArgumentException) e.getTargetException(); } catch (IllegalAccessException e) { @@ -117,7 +117,7 @@ public void shouldThrowExceptionWhenCreateMongoClientWithNullPassword() { thrown.expect(IllegalArgumentException.class); MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); try { - buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, instrumentation); + buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, firehoseInstrumentation); } catch (InvocationTargetException e) { throw (IllegalArgumentException) e.getTargetException(); } catch (IllegalAccessException e) { @@ -136,7 +136,7 @@ public void shouldThrowExceptionWhenCreateMongoClientWithNullAuthDB() { MongoSinkConfig mongoSinkConfig = ConfigFactory.create(MongoSinkConfig.class, configuration); try { - buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, instrumentation); + buildMongoClientMethod.invoke(mongoSinkFactory, mongoSinkConfig, firehoseInstrumentation); } catch (InvocationTargetException e) { throw (IllegalArgumentException) e.getTargetException(); } catch (IllegalAccessException e) { diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkTest.java index ee6701307..bfa5ca0a6 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/MongoSinkTest.java @@ -6,7 +6,7 @@ import com.mongodb.client.model.WriteModel; import io.odpf.firehose.config.enums.SinkType; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.mongodb.client.MongoSinkClient; import io.odpf.firehose.sink.mongodb.request.MongoRequestHandler; import org.bson.BsonDocument; @@ -33,7 +33,7 @@ public class MongoSinkTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private MongoRequestHandler mongoRequestHandler; @@ -77,7 +77,7 @@ public void setUp() { @Test public void shouldGetRequestForEachMessageInEsbMessagesList() { - MongoSink mongoSink = new MongoSink(instrumentation, SinkType.MONGODB.name(), mongoRequestHandler, + MongoSink mongoSink = new MongoSink(firehoseInstrumentation, SinkType.MONGODB.name(), mongoRequestHandler, mongoSinkClient); mongoSink.prepare(messages); @@ -87,7 +87,7 @@ public void shouldGetRequestForEachMessageInEsbMessagesList() { @Test public void shouldGetCorrectRequestsForEachMessageInEsbMessagesList() throws IllegalAccessException, NoSuchFieldException { - MongoSink mongoSink = new MongoSink(instrumentation, SinkType.MONGODB.name(), mongoRequestHandler, + MongoSink mongoSink = new MongoSink(firehoseInstrumentation, SinkType.MONGODB.name(), mongoRequestHandler, mongoSinkClient); Field requestsField = MongoSink.class.getDeclaredField("requests"); @@ -105,7 +105,7 @@ public void shouldReturnFailedMessagesWhenBulkRequestFailed() throws NoSuchField BulkWriteError writeError2 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSink mongoSink = new MongoSink(instrumentation, SinkType.MONGODB.name(), mongoRequestHandler, + MongoSink mongoSink = new MongoSink(firehoseInstrumentation, SinkType.MONGODB.name(), mongoRequestHandler, mongoSinkClient); Field messagesField = MongoSink.class.getDeclaredField("messages"); messagesField.setAccessible(true); @@ -121,7 +121,7 @@ public void shouldReturnFailedMessagesWhenBulkRequestFailed() throws NoSuchField @Test public void shouldReturnEmptyListWhenBulRequestSucceeds() throws NoSuchFieldException, IllegalAccessException { List writeErrors = new ArrayList<>(); - MongoSink mongoSink = new MongoSink(instrumentation, SinkType.MONGODB.name(), mongoRequestHandler, + MongoSink mongoSink = new MongoSink(firehoseInstrumentation, SinkType.MONGODB.name(), mongoRequestHandler, mongoSinkClient); Field messagesField = MongoSink.class.getDeclaredField("messages"); diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientTest.java index 913640040..00060a299 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientTest.java @@ -11,7 +11,7 @@ import com.mongodb.client.model.ReplaceOptions; import com.mongodb.client.model.WriteModel; import io.odpf.firehose.config.MongoSinkConfig; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.bson.BsonDocument; import org.bson.Document; import org.junit.Assert; @@ -36,7 +36,7 @@ public class MongoSinkClientTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private MongoSinkConfig mongoSinkConfig; @@ -70,7 +70,7 @@ public void setUp() { @Test public void shouldReturnEmptyArrayListWhenBulkResponseExecutedSuccessfully() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 1, 1, 0)); List nonBlacklistedErrors = mongoSinkClient.processRequest(request); @@ -83,7 +83,7 @@ public void shouldReturnNonBlacklistedErrorsWhenBulkResponseHasFailuresAndEmptyB BulkWriteError writeError1 = new BulkWriteError(400, "DB not found", new BsonDocument(), 0); BulkWriteError writeError2 = new BulkWriteError(400, "DB not found", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, new ArrayList<>(), mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); @@ -97,7 +97,7 @@ public void shouldReturnNonBlacklistedErrorsIfNoneOfTheFailuresBelongToBlacklist BulkWriteError writeError1 = new BulkWriteError(400, "DB not found", new BsonDocument(), 0); BulkWriteError writeError2 = new BulkWriteError(400, "DB not found", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); @@ -111,14 +111,14 @@ public void shouldReportTelemetryIfTheResponsesBelongToBlacklistStatusCode() { BulkWriteError writeError1 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 0); BulkWriteError writeError2 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); mongoSinkClient.processRequest(request); - verify(instrumentation, times(2)).logWarn("Non-retriable error due to response status: {} is under blacklisted status code", 11000); - verify(instrumentation, times(2)).logInfo("Message dropped because of status code: 11000"); - verify(instrumentation, times(2)).incrementCounter("firehose_sink_messages_drop_total", "cause=Duplicate Key Error"); + verify(firehoseInstrumentation, times(2)).logWarn("Non-retriable error due to response status: {} is under blacklisted status code", 11000); + verify(firehoseInstrumentation, times(2)).logInfo("Message dropped because of status code: 11000"); + verify(firehoseInstrumentation, times(2)).incrementCounter("firehose_sink_messages_drop_total", "cause=Duplicate Key Error"); } @Test @@ -126,14 +126,14 @@ public void shouldReportTelemetryIfSomeOfTheFailuresDontBelongToBlacklist() { BulkWriteError writeError1 = new BulkWriteError(400, "Duplicate Key Error", new BsonDocument(), 0); BulkWriteError writeError2 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logWarn("Non-retriable error due to response status: {} is under blacklisted status code", 11000); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 11000"); - verify(instrumentation, times(1)).incrementCounter("firehose_sink_messages_drop_total", "cause=Duplicate Key Error"); + verify(firehoseInstrumentation, times(1)).logWarn("Non-retriable error due to response status: {} is under blacklisted status code", 11000); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 11000"); + verify(firehoseInstrumentation, times(1)).incrementCounter("firehose_sink_messages_drop_total", "cause=Duplicate Key Error"); } @Test @@ -143,7 +143,7 @@ public void shouldReturnNonBlacklistedErrorsIfSomeOfTheFailuresDontBelongToBlack BulkWriteError writeError3 = new BulkWriteError(502, "Collection not found", new BsonDocument(), 2); List writeErrors = Arrays.asList(writeError1, writeError2, writeError3); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); request.add(new ReplaceOneModel<>( @@ -156,7 +156,7 @@ public void shouldReturnNonBlacklistedErrorsIfSomeOfTheFailuresDontBelongToBlack List nonBlacklistedErrors = mongoSinkClient.processRequest(request); - verify(instrumentation, times(2)).incrementCounter(any(String.class), any(String.class)); + verify(firehoseInstrumentation, times(2)).incrementCounter(any(String.class), any(String.class)); Assert.assertEquals(1, nonBlacklistedErrors.size()); Assert.assertEquals(writeErrors.get(1), nonBlacklistedErrors.get(0)); @@ -167,108 +167,108 @@ public void shouldLogBulkRequestFailedWhenBulkResponsesHasFailures() { BulkWriteError writeError1 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 0); BulkWriteError writeError2 = new BulkWriteError(11000, "Duplicate Key Error", new BsonDocument(), 1); List writeErrors = Arrays.asList(writeError1, writeError2); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logWarn("Bulk request failed count: {}", 2); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failed count: {}", 2); } @Test public void shouldNotLogBulkRequestFailedWhenBulkResponsesHasNoFailures() { List writeErrors = new ArrayList<>(); - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenThrow(new MongoBulkWriteException(new BulkWriteResultMock(false, 0, 0, 0), writeErrors, null, new ServerAddress())); mongoSinkClient.processRequest(request); - verify(instrumentation, times(0)).logWarn("Bulk request failed count: {}", 2); + verify(firehoseInstrumentation, times(0)).logWarn("Bulk request failed count: {}", 2); } @Test public void shouldLogBulkRequestFailedWhenPrimaryKeyNotFoundForAllMessages() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 0, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logWarn("Bulk request failed"); - verify(instrumentation, times(1)).logWarn("Bulk request failures count: {}", 2); - verify(instrumentation, times(1)).logWarn("Some Messages were dropped because their Primary Key values had no matches"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failed"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failures count: {}", 2); + verify(firehoseInstrumentation, times(1)).logWarn("Some Messages were dropped because their Primary Key values had no matches"); } @Test public void shouldLogBulkRequestPartiallySucceededWhenPrimaryKeyNotFoundForSomeMessages() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 1, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logWarn("Bulk request partially succeeded"); - verify(instrumentation, times(1)).logWarn("Bulk request failures count: {}", 1); - verify(instrumentation, times(1)).logWarn("Some Messages were dropped because their Primary Key values had no matches"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request partially succeeded"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk request failures count: {}", 1); + verify(firehoseInstrumentation, times(1)).logWarn("Some Messages were dropped because their Primary Key values had no matches"); } @Test public void shouldLogBulkRequestsNotAcknowledgedWhenNoAcknowledgementReceived() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(false, 0, 1, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logWarn("Bulk Write operation was not acknowledged"); + verify(firehoseInstrumentation, times(1)).logWarn("Bulk Write operation was not acknowledged"); } @Test public void shouldLogBulkRequestsSucceededWhenNoFailuresForUpdateOnly() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 2, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logInfo("Bulk request succeeded"); - verify(instrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo("Bulk request succeeded"); + verify(firehoseInstrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); + verify(firehoseInstrumentation, times(1)).logInfo( "Inserted Count = {}. Matched Count = {}. Deleted Count = {}. Updated Count = {}. Total Modified Count = {}", 0, 2, 0, 2, 2); } @Test public void shouldLogBulkRequestsSucceededWhenNoFailuresForInsertOnly() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 2, 0, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logInfo("Bulk request succeeded"); - verify(instrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); + verify(firehoseInstrumentation, times(1)).logInfo("Bulk request succeeded"); + verify(firehoseInstrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo( "Inserted Count = {}. Matched Count = {}. Deleted Count = {}. Updated Count = {}. Total Modified Count = {}", 2, 0, 0, 0, 2); } @Test public void shouldLogBulkRequestsSucceededWhenNoFailuresForBothUpdateAndInsert() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 1, 1, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).logInfo("Bulk request succeeded"); - verify(instrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo("Bulk request succeeded"); + verify(firehoseInstrumentation, times(1)).logInfo("Bulk Write operation was successfully acknowledged"); + verify(firehoseInstrumentation, times(1)).logInfo( "Inserted Count = {}. Matched Count = {}. Deleted Count = {}. Updated Count = {}. Total Modified Count = {}", 1, 1, 0, 1, 2); @@ -277,55 +277,55 @@ public void shouldLogBulkRequestsSucceededWhenNoFailuresForBothUpdateAndInsert() @Test public void shouldIncrementFailureCounterTagWhenPrimaryKeyNotFoundInUpdateOnlyMode() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 1, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(1)).incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=Primary Key value not found"); + verify(firehoseInstrumentation, times(1)).incrementCounter(SINK_MESSAGES_DROP_TOTAL, "cause=Primary Key value not found"); } @Test public void shouldIncrementInsertedCounterTagOnSuccessfulInsertion() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 3, 0, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_INSERTED_TOTAL); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_INSERTED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); } @Test public void shouldIncrementUpdatedCounterTagOnSuccessfulUpdation() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 3, 0)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_UPDATED_TOTAL); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_UPDATED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); } @Test public void shouldIncrementInsertedCounterTagOnSuccessfulInsertionInUpsertMode() { - MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, instrumentation, + MongoSinkClient mongoSinkClient = new MongoSinkClient(mongoCollection, firehoseInstrumentation, mongoRetryStatusCodeBlacklist, mongoClient, mongoSinkConfig); when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); when(mongoCollection.bulkWrite(request)).thenReturn(new BulkWriteResultMock(true, 0, 0, 3)); mongoSinkClient.processRequest(request); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_INSERTED_TOTAL); - verify(instrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_INSERTED_TOTAL); + verify(firehoseInstrumentation, times(3)).incrementCounter(SINK_MONGO_MODIFIED_TOTAL); } diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtilTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtilTest.java index 005a55a4a..70e72dd01 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtilTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/client/MongoSinkClientUtilTest.java @@ -2,7 +2,7 @@ import com.mongodb.MongoClient; import com.mongodb.client.MongoDatabase; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -18,7 +18,7 @@ public class MongoSinkClientUtilTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private MongoClient mongoClient; @@ -83,12 +83,12 @@ public void shouldThrowExceptionWhenStatusCodesHaveInvalidCharacters() { @Test public void shouldThrowExceptionWhenNullDatabaseName() { thrown.expect(IllegalArgumentException.class); - MongoSinkClientUtil.checkDatabaseExists(null, mongoClient, instrumentation); + MongoSinkClientUtil.checkDatabaseExists(null, mongoClient, firehoseInstrumentation); } @Test public void shouldThrowExceptionWhenNullCollectionName() { thrown.expect(IllegalArgumentException.class); - MongoSinkClientUtil.checkCollectionExists(null, mongoDatabase, instrumentation); + MongoSinkClientUtil.checkCollectionExists(null, mongoDatabase, firehoseInstrumentation); } } diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactoryTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactoryTest.java index ae4a77b45..894bc2d1b 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoRequestHandlerFactoryTest.java @@ -3,7 +3,7 @@ import io.odpf.firehose.config.MongoSinkConfig; import io.odpf.firehose.config.enums.MongoSinkMessageType; import io.odpf.firehose.config.enums.MongoSinkRequestType; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.serializer.MessageToJson; import org.junit.Before; import org.junit.Rule; @@ -26,7 +26,7 @@ public class MongoRequestHandlerFactoryTest { private MongoSinkConfig mongoSinkConfig; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private MessageToJson jsonSerializer; @@ -40,7 +40,7 @@ public void shouldReturnMongoRequestHandler() { String primaryKey = "customer_id"; when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(new Random().nextBoolean()); - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("message"); MongoRequestHandler requestHandler = mongoRequestHandlerFactory.getRequestHandler(); @@ -53,12 +53,12 @@ public void shouldReturnUpsertRequestHandler() { String primaryKey = "customer_id"; when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(false); - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("message"); MongoRequestHandler requestHandler = mongoRequestHandlerFactory.getRequestHandler(); - verify(instrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPSERT); + verify(firehoseInstrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPSERT); assertEquals(MongoUpsertRequestHandler.class, requestHandler.getClass()); } @@ -67,12 +67,12 @@ public void shouldReturnUpdateRequestHandler() { String primaryKey = "customer_id"; when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("message"); MongoRequestHandler requestHandler = mongoRequestHandlerFactory.getRequestHandler(); - verify(instrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPDATE_ONLY); + verify(firehoseInstrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPDATE_ONLY); assertEquals(MongoUpdateRequestHandler.class, requestHandler.getClass()); } @@ -80,7 +80,7 @@ public void shouldReturnUpdateRequestHandler() { public void shouldThrowExceptionWhenInvalidRecordParserMode() { String primaryKey = "customer_id"; - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("xyz"); @@ -94,12 +94,12 @@ public void shouldCreateUpsertRequestHandlerWhenPrimaryKeyNotSpecified() { String primaryKey = null; when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(false); - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("message"); MongoRequestHandler requestHandler = mongoRequestHandlerFactory.getRequestHandler(); - verify(instrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPSERT); + verify(firehoseInstrumentation, times(1)).logInfo("Mongo request mode: {}", MongoSinkRequestType.UPSERT); assertEquals(MongoUpsertRequestHandler.class, requestHandler.getClass()); } @@ -107,7 +107,7 @@ public void shouldCreateUpsertRequestHandlerWhenPrimaryKeyNotSpecified() { public void shouldThrowExceptionWhenCreateUpdateRequestHandlerWhenPrimaryKeyNotSpecified() { String primaryKey = null; when(mongoSinkConfig.isSinkMongoModeUpdateOnlyEnable()).thenReturn(true); - MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, instrumentation, primaryKey, + MongoRequestHandlerFactory mongoRequestHandlerFactory = new MongoRequestHandlerFactory(mongoSinkConfig, firehoseInstrumentation, primaryKey, MongoSinkMessageType.JSON, jsonSerializer); when(mongoSinkConfig.getKafkaRecordParserMode()).thenReturn("message"); diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpdateRequestHandlerTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpdateRequestHandlerTest.java index 287bea073..f77004ead 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpdateRequestHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpdateRequestHandlerTest.java @@ -1,5 +1,6 @@ package io.odpf.firehose.sink.mongodb.request; +import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; import com.google.protobuf.InvalidProtocolBufferException; @@ -17,14 +18,11 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import org.mockito.Mock; -import org.mockito.Mockito; import java.util.Base64; import java.util.stream.Collectors; import static org.junit.Assert.*; -import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; public class MongoUpdateRequestHandlerTest { @@ -32,8 +30,7 @@ public class MongoUpdateRequestHandlerTest { @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock - private StencilClient stencilClient; + private final StencilClient stencilClient = StencilClientFactory.getClient(); private MessageToJson jsonSerializer; private Message messageWithJSON; @@ -49,10 +46,7 @@ public void setUp() throws InvalidProtocolBufferException { logMessage = "CgYIyOm+xgUSBgiE6r7GBRgNIICAgIDA9/y0LigCMAM\u003d"; messageWithProto = new Message(null, Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); String protoClassName = TestAggregatedSupplyMessage.class.getName(); - when(stencilClient.get(protoClassName)).thenReturn(TestAggregatedSupplyMessage.getDescriptor()); jsonSerializer = new MessageToJson(stencilClient.getParser(protoClassName), true, false); } diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpsertRequestHandlerTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpsertRequestHandlerTest.java index adc70e1db..13d9e9c86 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpsertRequestHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/request/MongoUpsertRequestHandlerTest.java @@ -1,5 +1,6 @@ package io.odpf.firehose.sink.mongodb.request; +import io.odpf.stencil.StencilClientFactory; import io.odpf.stencil.client.StencilClient; import com.google.protobuf.InvalidProtocolBufferException; import com.mongodb.BasicDBObject; @@ -17,14 +18,11 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import org.mockito.Mock; -import org.mockito.Mockito; import java.util.Base64; import java.util.stream.Collectors; import static org.junit.Assert.*; -import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; public class MongoUpsertRequestHandlerTest { @@ -32,8 +30,7 @@ public class MongoUpsertRequestHandlerTest { @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock - private StencilClient stencilClient; + private final StencilClient stencilClient = StencilClientFactory.getClient(); private MessageToJson jsonSerializer; private Message messageWithJSON; @@ -49,10 +46,7 @@ public void setUp() throws InvalidProtocolBufferException { logMessage = "CgYIyOm+xgUSBgiE6r7GBRgNIICAgIDA9/y0LigCMAM\u003d"; messageWithProto = new Message(null, Base64.getDecoder().decode(logMessage.getBytes()), "sample-topic", 0, 100); - when(stencilClient.parse(Mockito.anyString(), Mockito.any())).thenCallRealMethod(); - when(stencilClient.getParser(Mockito.anyString())).thenCallRealMethod(); String protoClassName = TestAggregatedSupplyMessage.class.getName(); - when(stencilClient.get(protoClassName)).thenReturn(TestAggregatedSupplyMessage.getDescriptor()); jsonSerializer = new MessageToJson(stencilClient.getParser(protoClassName), true, false); } diff --git a/src/test/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtilTest.java b/src/test/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtilTest.java index 31a87f1fb..d3374bd1d 100644 --- a/src/test/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtilTest.java +++ b/src/test/java/io/odpf/firehose/sink/mongodb/util/MongoSinkFactoryUtilTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.mongodb.util; import com.mongodb.ServerAddress; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -19,7 +19,7 @@ public class MongoSinkFactoryUtilTest { public ExpectedException thrown = ExpectedException.none(); @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Before public void setUp() { @@ -31,7 +31,7 @@ public void shouldThrowIllegalArgumentExceptionForEmptyMongoConnectionURLs() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("SINK_MONGO_CONNECTION_URLS is empty or null"); - MongoSinkFactoryUtil.getServerAddresses("", instrumentation); + MongoSinkFactoryUtil.getServerAddresses("", firehoseInstrumentation); } @@ -39,7 +39,7 @@ public void shouldThrowIllegalArgumentExceptionForEmptyMongoConnectionURLs() { public void shouldThrowIllegalArgumentExceptionWhenServerPortInvalid() { String mongoConnectionURLs = "localhost:qfb"; thrown.expect(IllegalArgumentException.class); - MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); } @Test @@ -47,14 +47,14 @@ public void shouldThrowIllegalArgumentExceptionForNullMongoConnectionURLs() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("SINK_MONGO_CONNECTION_URLS is empty or null"); - MongoSinkFactoryUtil.getServerAddresses(null, instrumentation); + MongoSinkFactoryUtil.getServerAddresses(null, firehoseInstrumentation); } @Test public void shouldThrowIllegalArgumentExceptionForEmptyHost() { String mongoConnectionURLs = ":1000"; thrown.expect(IllegalArgumentException.class); - MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); } @Test @@ -62,13 +62,13 @@ public void shouldThrowIllegalArgumentExceptionForEmptyPort() { String mongoConnectionURLs = "localhost:"; thrown.expect(IllegalArgumentException.class); - MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); } @Test public void shouldGetServerAddressesForValidMongoConnectionURLs() { String mongoConnectionURLs = "localhost_1:1000,localhost_2:1000"; - List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); assertEquals("localhost_1", serverAddresses.get(0).getHost()); assertEquals(1000, serverAddresses.get(0).getPort()); @@ -79,7 +79,7 @@ public void shouldGetServerAddressesForValidMongoConnectionURLs() { @Test public void shouldGetServerAddressesForValidMongoConnectionURLsWithSpacesInBetween() { String mongoConnectionURLs = " localhost_1: 1000, localhost_2:1000"; - List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); assertEquals("localhost_1", serverAddresses.get(0).getHost()); assertEquals(1000, serverAddresses.get(0).getPort()); @@ -91,7 +91,7 @@ public void shouldGetServerAddressesForValidMongoConnectionURLsWithSpacesInBetwe @Test public void shouldGetServerAddressForIPInMongoConnectionURLs() { String mongoConnectionURLs = "172.28.32.156:1000"; - List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + List serverAddresses = MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); assertEquals("172.28.32.156", serverAddresses.get(0).getHost()); assertEquals(1000, serverAddresses.get(0).getPort()); @@ -103,6 +103,6 @@ public void shouldThrowExceptionIfHostAndPortNotProvidedProperly() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("SINK_MONGO_CONNECTION_URLS should contain host and port both"); - MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, instrumentation); + MongoSinkFactoryUtil.getServerAddresses(mongoConnectionURLs, firehoseInstrumentation); } } diff --git a/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkFactoryTest.java index f67be69ed..5be10e72a 100644 --- a/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkFactoryTest.java @@ -1,14 +1,14 @@ package io.odpf.firehose.sink.prometheus; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.AbstractSink; import io.odpf.stencil.client.StencilClient; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.util.HashMap; import java.util.Map; diff --git a/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkTest.java b/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkTest.java index 9f19a050b..017d94dd2 100644 --- a/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/prometheus/PromSinkTest.java @@ -6,7 +6,7 @@ import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; import io.odpf.firehose.exception.NeedToRetry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.prometheus.request.PromRequest; import io.odpf.stencil.client.StencilClient; import org.apache.http.Header; @@ -21,7 +21,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.xerial.snappy.Snappy; import java.io.ByteArrayInputStream; @@ -42,7 +42,7 @@ @RunWith(MockitoJUnitRunner.class) public class PromSinkTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private PromRequest request; @Mock @@ -92,7 +92,7 @@ public void shouldPrepareRequestDuringPreparationAndCallItDuringExecution() thro when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); @@ -108,7 +108,7 @@ public void shouldThrowNeedToRetryExceptionWhenResponseCodeIsGivenRange() throws when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-505"), requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); @@ -123,7 +123,7 @@ public void shouldThrowNeedToRetryExceptionWhenResponseCodeIsNull() throws Excep when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(null); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); } @@ -132,13 +132,13 @@ public void shouldThrowNeedToRetryExceptionWhenResponseCodeIsNull() throws Excep public void shouldCatchURISyntaxExceptionAndThrowIOException() throws URISyntaxException, DeserializerException, IOException { when(request.build(messages)).thenThrow(new URISyntaxException("", "")); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); } @Test public void shouldCloseStencilClient() throws IOException { - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.close(); verify(stencilClient, times(1)).close(); @@ -146,10 +146,10 @@ public void shouldCloseStencilClient() throws IOException { @Test public void shouldLogConnectionClosing() throws IOException { - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.close(); - verify(instrumentation, times(1)).logInfo("HTTP connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("HTTP connection closing"); } @Test @@ -165,17 +165,17 @@ public void shouldLogEntireRequestIfInStatusCodeRangeAndCaptureDroppedMessages() when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-505")); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(1)).logInfo( + verify(firehoseInstrumentation, times(1)).logInfo( "\nRequest Method: POST" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" + "\nRequest Body: " + body); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1L, "cause= 500"); } @Test @@ -190,11 +190,11 @@ public void shouldNotLogEntireRequestIfNotInStatusCodeRange() throws Exception { when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, new RangeToHashMapConverter().convert(null, "400-499")); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(0)).logInfo( + verify(firehoseInstrumentation, times(0)).logInfo( "\nRequest Method: POST" + "\nRequest Url: http://dummy.com" + "\nRequest Headers: [Accept: text/plain]" @@ -211,12 +211,12 @@ public void shouldCaptureDroppedMessagesMetricsIfNotInStatusCodeRange() throws E when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-499"), requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(1)).logInfo("Message dropped because of status code: 500"); - verify(instrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1, "cause= 500"); + verify(firehoseInstrumentation, times(1)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_messages_drop_total", 1L, "cause= 500"); } @Test(expected = NeedToRetry.class) @@ -227,11 +227,11 @@ public void shouldNotCaptureDroppedMessagesMetricsIfInStatusCodeRange() throws E when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, new RangeToHashMapConverter().convert(null, "400-600"), requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 500"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 500"); } @Test @@ -242,12 +242,12 @@ public void shouldNotCaptureDroppedMessagesMetricsIfStatusCodeIs200() throws Exc when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 200"); - verify(instrumentation, times(0)).captureCount("firehose_sink_messages_drop_total", 1, "200"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 200"); + verify(firehoseInstrumentation, times(0)).captureCount("firehose_sink_messages_drop_total", 1L, "200"); } @Test @@ -258,12 +258,12 @@ public void shouldNotCaptureDroppedMessagesMetricsIfStatusCodeIs201() throws Exc when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(0)).logInfo("Message dropped because of status code: 201"); - verify(instrumentation, times(0)).captureCount("firehose_sink_messages_drop_total", 1, "201"); + verify(firehoseInstrumentation, times(0)).logInfo("Message dropped because of status code: 201"); + verify(firehoseInstrumentation, times(0)).captureCount("firehose_sink_messages_drop_total", 1L, "201"); } @Test @@ -275,12 +275,12 @@ public void shouldCaptureResponseStatusCount() throws Exception { when(request.build(messages)).thenReturn(httpPostList); when(httpClient.execute(httpPost)).thenReturn(response); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); promSink.prepare(messages); promSink.execute(); - verify(instrumentation, times(1)).captureCount("firehose_sink_http_response_code_total", 1, "status_code=" + statusLine.getStatusCode()); + verify(firehoseInstrumentation, times(1)).captureCount("firehose_sink_http_response_code_total", 1L, "status_code=" + statusLine.getStatusCode()); } @Test @@ -290,7 +290,7 @@ public void shouldReadSnappyCompressedContent() throws Exception { when(httpPost.getEntity()).thenReturn(httpEntity); when(httpEntity.getContent()).thenReturn(inputStream); - PromSink promSink = new PromSink(instrumentation, request, httpClient, stencilClient, + PromSink promSink = new PromSink(firehoseInstrumentation, request, httpClient, stencilClient, retryStatusCodeRange, requestLogStatusCodeRanges); List requestBody = promSink.readContent(httpPost); diff --git a/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreatorTest.java b/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreatorTest.java index 0bf681f8e..d62752a72 100644 --- a/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreatorTest.java +++ b/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestCreatorTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.prometheus.request; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.PromSinkConfig; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.stencil.Parser; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; diff --git a/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestTest.java b/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestTest.java index 75d98c172..ceef64e79 100644 --- a/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestTest.java +++ b/src/test/java/io/odpf/firehose/sink/prometheus/request/PromRequestTest.java @@ -3,7 +3,7 @@ import com.google.protobuf.DynamicMessage; import cortexpb.Cortex; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.prometheus.builder.HeaderBuilder; import io.odpf.firehose.sink.prometheus.builder.RequestEntityBuilder; import io.odpf.firehose.sink.prometheus.builder.WriteRequestBuilder; @@ -35,7 +35,7 @@ public class PromRequestTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private HeaderBuilder headerBuilder; @@ -81,7 +81,7 @@ public void shouldProperlyBuildMessages() throws URISyntaxException, IOException when(writeRequestBuilder.buildWriteRequest(messages)).thenReturn(writeRequestBody); when(requestEntityBuilder.buildHttpEntity(writeRequestBody)).thenReturn(new ByteArrayEntity(compressedBody)); - PromRequest promRequest = new PromRequest(instrumentation, headerBuilder, url, requestEntityBuilder, writeRequestBuilder); + PromRequest promRequest = new PromRequest(firehoseInstrumentation, headerBuilder, url, requestEntityBuilder, writeRequestBuilder); HttpEntityEnclosingRequestBase request = promRequest.build(messages).get(0); BasicHeader header1 = new BasicHeader(CONTENT_ENCODING, CONTENT_ENCODING_DEFAULT); diff --git a/src/test/java/io/odpf/firehose/sink/redis/RedisSinkFactoryTest.java b/src/test/java/io/odpf/firehose/sink/redis/RedisSinkFactoryTest.java index 09bcd4e91..a42bc7a14 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/RedisSinkFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/RedisSinkFactoryTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.redis; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.sink.AbstractSink; import io.odpf.stencil.client.StencilClient; import org.junit.Before; diff --git a/src/test/java/io/odpf/firehose/sink/redis/RedisSinkTest.java b/src/test/java/io/odpf/firehose/sink/redis/RedisSinkTest.java index 112782471..6b22704a7 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/RedisSinkTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/RedisSinkTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sink.redis; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.client.RedisClient; import io.odpf.firehose.sink.redis.exception.NoResponseException; import org.junit.Before; @@ -9,7 +9,7 @@ import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.time.Instant; import java.util.ArrayList; @@ -20,13 +20,13 @@ public class RedisSinkTest { @Mock private RedisClient redisClient; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; private RedisSink redis; @Before public void setup() { - when(instrumentation.startExecution()).thenReturn(Instant.now()); - redis = new RedisSink(instrumentation, "redis", redisClient); + when(firehoseInstrumentation.startExecution()).thenReturn(Instant.now()); + redis = new RedisSink(firehoseInstrumentation, "redis", redisClient); } @Test @@ -56,7 +56,7 @@ public void shouldInvokeCloseOnTheClient() { public void shouldLogWhenClosingConnection() { redis.close(); - verify(instrumentation, times(1)).logInfo("Redis connection closing"); + verify(firehoseInstrumentation, times(1)).logInfo("Redis connection closing"); } @Test @@ -65,15 +65,15 @@ public void sendsMetricsForSuccessMessages() { redis.pushMessage(messages); - verify(instrumentation, times(1)).capturePreExecutionLatencies(messages); - verify(instrumentation, times(1)).startExecution(); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); - verify(instrumentation, times(1)).captureSinkExecutionTelemetry(any(), any()); - InOrder inOrder = inOrder(instrumentation); - inOrder.verify(instrumentation).logInfo("Preparing {} messages", messages.size()); - inOrder.verify(instrumentation).capturePreExecutionLatencies(messages); - inOrder.verify(instrumentation).startExecution(); - inOrder.verify(instrumentation).captureSinkExecutionTelemetry(any(), any()); + verify(firehoseInstrumentation, times(1)).capturePreExecutionLatencies(messages); + verify(firehoseInstrumentation, times(1)).startExecution(); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); + verify(firehoseInstrumentation, times(1)).captureSinkExecutionTelemetry(any(), any()); + InOrder inOrder = inOrder(firehoseInstrumentation); + inOrder.verify(firehoseInstrumentation).logInfo("Preparing {} messages", messages.size()); + inOrder.verify(firehoseInstrumentation).capturePreExecutionLatencies(messages); + inOrder.verify(firehoseInstrumentation).startExecution(); + inOrder.verify(firehoseInstrumentation).captureSinkExecutionTelemetry(any(), any()); } @Test @@ -83,15 +83,15 @@ public void sendsMetricsForFailedMessages() { redis.pushMessage(messages); - verify(instrumentation, times(1)).capturePreExecutionLatencies(messages); - verify(instrumentation, times(1)).startExecution(); - verify(instrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); - verify(instrumentation, times(1)).captureSinkExecutionTelemetry(any(), any()); - InOrder inOrder = inOrder(instrumentation); - inOrder.verify(instrumentation).logInfo("Preparing {} messages", messages.size()); - inOrder.verify(instrumentation).capturePreExecutionLatencies(messages); - inOrder.verify(instrumentation).startExecution(); - inOrder.verify(instrumentation).captureSinkExecutionTelemetry(any(), any()); + verify(firehoseInstrumentation, times(1)).capturePreExecutionLatencies(messages); + verify(firehoseInstrumentation, times(1)).startExecution(); + verify(firehoseInstrumentation, times(1)).logInfo("Preparing {} messages", messages.size()); + verify(firehoseInstrumentation, times(1)).captureSinkExecutionTelemetry(any(), any()); + InOrder inOrder = inOrder(firehoseInstrumentation); + inOrder.verify(firehoseInstrumentation).logInfo("Preparing {} messages", messages.size()); + inOrder.verify(firehoseInstrumentation).capturePreExecutionLatencies(messages); + inOrder.verify(firehoseInstrumentation).startExecution(); + inOrder.verify(firehoseInstrumentation).captureSinkExecutionTelemetry(any(), any()); } diff --git a/src/test/java/io/odpf/firehose/sink/redis/client/RedisClientFactoryTest.java b/src/test/java/io/odpf/firehose/sink/redis/client/RedisClientFactoryTest.java index 0bdcbcd45..8ba8c2ad2 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/client/RedisClientFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/client/RedisClientFactoryTest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sink.redis.client; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.config.enums.RedisSinkDeploymentType; import io.odpf.firehose.config.enums.RedisSinkDataType; import io.odpf.firehose.config.enums.RedisSinkTtlType; import io.odpf.firehose.exception.ConfigurationException; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.stencil.client.StencilClient; import org.junit.Assert; import org.junit.Rule; @@ -14,7 +14,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.mockito.Mockito.when; diff --git a/src/test/java/io/odpf/firehose/sink/redis/client/RedisClusterClientTest.java b/src/test/java/io/odpf/firehose/sink/redis/client/RedisClusterClientTest.java index ab791894e..75ea4e909 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/client/RedisClusterClientTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/client/RedisClusterClientTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sink.redis.client; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisHashSetFieldEntry; import io.odpf.firehose.sink.redis.dataentry.RedisListEntry; @@ -14,7 +14,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import redis.clients.jedis.JedisCluster; import java.util.ArrayList; @@ -29,12 +29,12 @@ public class RedisClusterClientTest { private StatsDReporter statsDReporter; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - private final RedisHashSetFieldEntry firstRedisSetEntry = new RedisHashSetFieldEntry("key1", "field1", "value1", new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); - private final RedisHashSetFieldEntry secondRedisSetEntry = new RedisHashSetFieldEntry("key2", "field2", "value2", new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); - private final RedisListEntry firstRedisListEntry = new RedisListEntry("key1", "value1", new Instrumentation(statsDReporter, RedisListEntry.class)); - private final RedisListEntry secondRedisListEntry = new RedisListEntry("key2", "value2", new Instrumentation(statsDReporter, RedisListEntry.class)); + private final RedisHashSetFieldEntry firstRedisSetEntry = new RedisHashSetFieldEntry("key1", "field1", "value1", new FirehoseInstrumentation(statsDReporter, RedisHashSetFieldEntry.class)); + private final RedisHashSetFieldEntry secondRedisSetEntry = new RedisHashSetFieldEntry("key2", "field2", "value2", new FirehoseInstrumentation(statsDReporter, RedisHashSetFieldEntry.class)); + private final RedisListEntry firstRedisListEntry = new RedisListEntry("key1", "value1", new FirehoseInstrumentation(statsDReporter, RedisListEntry.class)); + private final RedisListEntry secondRedisListEntry = new RedisListEntry("key2", "value2", new FirehoseInstrumentation(statsDReporter, RedisListEntry.class)); @Mock private JedisCluster jedisCluster; @@ -53,7 +53,7 @@ public void setup() { messages = Arrays.asList(new Message(new byte[0], new byte[0], "topic", 0, 100), new Message(new byte[0], new byte[0], "topic", 0, 100)); - redisClusterClient = new RedisClusterClient(instrumentation, redisParser, redisTTL, jedisCluster); + redisClusterClient = new RedisClusterClient(firehoseInstrumentation, redisParser, redisTTL, jedisCluster); redisDataEntries = new ArrayList<>(); @@ -103,7 +103,7 @@ public void shouldReturnEmptyArrayAfterExecuting() { public void shouldCloseTheJedisClient() { redisClusterClient.close(); - verify(instrumentation, times(1)).logInfo("Closing Jedis client"); + verify(firehoseInstrumentation, times(1)).logInfo("Closing Jedis client"); verify(jedisCluster).close(); } diff --git a/src/test/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClientTest.java b/src/test/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClientTest.java index cb340ab37..8bdc67c78 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClientTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/client/RedisStandaloneClientTest.java @@ -1,9 +1,9 @@ package io.odpf.firehose.sink.redis.client; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.message.Message; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; -import io.odpf.firehose.metrics.StatsDReporter; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisHashSetFieldEntry; import io.odpf.firehose.sink.redis.dataentry.RedisListEntry; @@ -18,7 +18,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import redis.clients.jedis.Jedis; import redis.clients.jedis.Pipeline; import redis.clients.jedis.Response; @@ -35,12 +35,12 @@ public class RedisStandaloneClientTest { @Mock private StatsDReporter statsDReporter; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; - private final RedisHashSetFieldEntry firstRedisSetEntry = new RedisHashSetFieldEntry("key1", "field1", "value1", new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); - private final RedisHashSetFieldEntry secondRedisSetEntry = new RedisHashSetFieldEntry("key2", "field2", "value2", new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); - private final RedisListEntry firstRedisListEntry = new RedisListEntry("key1", "value1", new Instrumentation(statsDReporter, RedisListEntry.class)); - private final RedisListEntry secondRedisListEntry = new RedisListEntry("key2", "value2", new Instrumentation(statsDReporter, RedisListEntry.class)); + private final RedisHashSetFieldEntry firstRedisSetEntry = new RedisHashSetFieldEntry("key1", "field1", "value1", new FirehoseInstrumentation(statsDReporter, RedisHashSetFieldEntry.class)); + private final RedisHashSetFieldEntry secondRedisSetEntry = new RedisHashSetFieldEntry("key2", "field2", "value2", new FirehoseInstrumentation(statsDReporter, RedisHashSetFieldEntry.class)); + private final RedisListEntry firstRedisListEntry = new RedisListEntry("key1", "value1", new FirehoseInstrumentation(statsDReporter, RedisListEntry.class)); + private final RedisListEntry secondRedisListEntry = new RedisListEntry("key2", "value2", new FirehoseInstrumentation(statsDReporter, RedisListEntry.class)); @Rule public ExpectedException expectedException = ExpectedException.none(); private RedisClient redisClient; @@ -68,7 +68,7 @@ public void setUp() { messages = Arrays.asList(new Message(new byte[0], new byte[0], "topic", 0, 100), new Message(new byte[0], new byte[0], "topic", 0, 100)); - redisClient = new RedisStandaloneClient(instrumentation, redisMessageParser, redisTTL, jedis); + redisClient = new RedisStandaloneClient(firehoseInstrumentation, redisMessageParser, redisTTL, jedis); redisDataEntries = new ArrayList<>(); @@ -128,7 +128,7 @@ public void shouldCompleteTransactionInExec() { redisClient.execute(); verify(jedisPipeline).exec(); - verify(instrumentation, times(1)).logDebug("jedis responses: {}", responses); + verify(firehoseInstrumentation, times(1)).logDebug("jedis responses: {}", responses); } @Test @@ -183,7 +183,7 @@ public void shouldReturnEmptyArrayInExec() { public void shouldCloseTheClient() { redisClient.close(); - verify(instrumentation, times(1)).logInfo("Closing Jedis client"); + verify(firehoseInstrumentation, times(1)).logInfo("Closing Jedis client"); verify(jedis, times(1)).close(); } diff --git a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntryTest.java b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntryTest.java index 048bcf65a..c2fb58ad0 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisHashSetFieldEntryTest.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.DurationTtl; import io.odpf.firehose.sink.redis.ttl.ExactTimeTtl; import io.odpf.firehose.sink.redis.ttl.NoRedisTtl; @@ -12,18 +12,16 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) public class RedisHashSetFieldEntryTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Pipeline pipeline; @@ -40,7 +38,7 @@ public class RedisHashSetFieldEntryTest { public void setup() { MockitoAnnotations.initMocks(this); redisTTL = new NoRedisTtl(); - redisHashSetFieldEntry = new RedisHashSetFieldEntry("test-key", "test-field", "test-value", instrumentation); + redisHashSetFieldEntry = new RedisHashSetFieldEntry("test-key", "test-field", "test-value", firehoseInstrumentation); inOrderPipeline = Mockito.inOrder(pipeline); inOrderJedis = Mockito.inOrder(jedisCluster); } @@ -52,7 +50,7 @@ public void shouldIOnlyPushDataWithoutTTLByDefaultForPipeline() { verify(pipeline, times(1)).hset("test-key", "test-field", "test-value"); verify(pipeline, times(0)).expireAt(any(String.class), any(Long.class)); verify(pipeline, times(0)).expireAt(any(String.class), any(Long.class)); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } @Test @@ -62,7 +60,7 @@ public void shouldSetProperTTLForExactTimeForPipeline() { inOrderPipeline.verify(pipeline, times(1)).hset("test-key", "test-field", "test-value"); inOrderPipeline.verify(pipeline, times(1)).expireAt("test-key", 1000L); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } @Test @@ -72,7 +70,7 @@ public void shouldSetProperTTLForDurationForPipeline() { inOrderPipeline.verify(pipeline, times(1)).hset("test-key", "test-field", "test-value"); inOrderPipeline.verify(pipeline, times(1)).expire("test-key", 1000); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } @Test @@ -82,7 +80,7 @@ public void shouldIOnlyPushDataWithoutTTLByDefaultForCluster() { verify(jedisCluster, times(1)).hset("test-key", "test-field", "test-value"); verify(jedisCluster, times(0)).expireAt(any(String.class), any(Long.class)); verify(jedisCluster, times(0)).expireAt(any(String.class), any(Long.class)); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } @Test @@ -92,7 +90,7 @@ public void shouldSetProperTTLForExactTimeForCluster() { inOrderJedis.verify(jedisCluster, times(1)).hset("test-key", "test-field", "test-value"); inOrderJedis.verify(jedisCluster, times(1)).expireAt("test-key", 1000L); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } @Test @@ -102,6 +100,6 @@ public void shouldSetProperTTLForDuration() { inOrderJedis.verify(jedisCluster, times(1)).hset("test-key", "test-field", "test-value"); inOrderJedis.verify(jedisCluster, times(1)).expire("test-key", 1000); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); } } diff --git a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntryTest.java b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntryTest.java index 5397d6b9b..31f0f2a46 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisKeyValueEntryTest.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.DurationTtl; import io.odpf.firehose.sink.redis.ttl.NoRedisTtl; import org.junit.Before; @@ -12,13 +12,11 @@ import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.*; public class RedisKeyValueEntryTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Pipeline pipeline; @@ -42,7 +40,7 @@ public void setup() { public void pushMessageWithNoTtl() { String key = "key"; String value = "value"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(pipeline, new NoRedisTtl()); inOrderPipeline.verify(pipeline, times(1)).set(key, value); inOrderPipeline.verify(pipeline, times(0)).expireAt(any(String.class), any(Long.class)); @@ -53,7 +51,7 @@ public void pushMessageWithNoTtl() { public void pushMessageWithTtl() { String key = "key"; String value = "value"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(pipeline, new DurationTtl(100)); inOrderPipeline.verify(pipeline, times(1)).set(key, value); inOrderPipeline.verify(pipeline, times(1)).expire(key, 100); @@ -63,9 +61,9 @@ public void pushMessageWithTtl() { public void pushMessageVerifyInstrumentation() { String key = "this-key"; String value = "john"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(pipeline, new DurationTtl(100)); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", key, value); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", key, value); } @@ -73,7 +71,7 @@ public void pushMessageVerifyInstrumentation() { public void pushMessageWithNoTtlUsingJedisCluster() { String key = "key"; String value = "value"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(jedisCluster, new NoRedisTtl()); inOrderJedis.verify(jedisCluster, times(1)).set(key, value); inOrderJedis.verify(jedisCluster, times(0)).expireAt(any(String.class), any(Long.class)); @@ -84,7 +82,7 @@ public void pushMessageWithNoTtlUsingJedisCluster() { public void pushMessageWithTtlUsingJedisCluster() { String key = "key"; String value = "value"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(jedisCluster, new DurationTtl(100)); inOrderJedis.verify(jedisCluster, times(1)).set(key, value); inOrderJedis.verify(jedisCluster, times(1)).expire(key, 100); @@ -94,9 +92,9 @@ public void pushMessageWithTtlUsingJedisCluster() { public void pushMessageVerifyInstrumentationUsingJedisCluster() { String key = "this-key"; String value = "john"; - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, instrumentation); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(key, value, firehoseInstrumentation); redisKeyValueEntry.pushMessage(jedisCluster, new DurationTtl(100)); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", key, value); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", key, value); } } diff --git a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntryTest.java b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntryTest.java index 687a9d418..d4583f633 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/dataentry/RedisListEntryTest.java @@ -1,6 +1,6 @@ package io.odpf.firehose.sink.redis.dataentry; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.sink.redis.ttl.DurationTtl; import io.odpf.firehose.sink.redis.ttl.ExactTimeTtl; import io.odpf.firehose.sink.redis.ttl.NoRedisTtl; @@ -9,19 +9,17 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) public class RedisListEntryTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private Pipeline pipeline; @@ -35,7 +33,7 @@ public class RedisListEntryTest { @Before public void setup() { redisTTL = new NoRedisTtl(); - redisListEntry = new RedisListEntry("test-key", "test-value", instrumentation); + redisListEntry = new RedisListEntry("test-key", "test-value", firehoseInstrumentation); } @Test @@ -45,7 +43,7 @@ public void shouldIOnlyPushDataWithoutTTLByDefaultForPipeline() { verify(pipeline, times(1)).lpush("test-key", "test-value"); verify(pipeline, times(0)).expireAt(any(String.class), any(Long.class)); verify(pipeline, times(0)).expireAt(any(String.class), any(Long.class)); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } @Test @@ -54,7 +52,7 @@ public void shouldSetProperTTLForExactTimeForPipeline() { redisListEntry.pushMessage(pipeline, redisTTL); verify(pipeline, times(1)).expireAt("test-key", 1000L); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } @Test @@ -63,7 +61,7 @@ public void shouldSetProperTTLForDurationForPipeline() { redisListEntry.pushMessage(pipeline, redisTTL); verify(pipeline, times(1)).expire("test-key", 1000); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } @Test @@ -73,7 +71,7 @@ public void shouldIOnlyPushDataWithoutTTLByDefaultForCluster() { verify(jedisCluster, times(1)).lpush("test-key", "test-value"); verify(jedisCluster, times(0)).expireAt(any(String.class), any(Long.class)); verify(jedisCluster, times(0)).expireAt(any(String.class), any(Long.class)); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } @Test @@ -82,7 +80,7 @@ public void shouldSetProperTTLForExactTimeForCluster() { redisListEntry.pushMessage(jedisCluster, redisTTL); verify(jedisCluster, times(1)).expireAt("test-key", 1000L); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } @Test @@ -91,6 +89,6 @@ public void shouldSetProperTTLForDurationForCluster() { redisListEntry.pushMessage(jedisCluster, redisTTL); verify(jedisCluster, times(1)).expire("test-key", 1000); - verify(instrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); + verify(firehoseInstrumentation, times(1)).logDebug("key: {}, value: {}", "test-key", "test-value"); } } diff --git a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParserTest.java b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParserTest.java index a566232a5..407425813 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParserTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisHashSetParserTest.java @@ -2,6 +2,7 @@ +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.config.enums.RedisSinkDataType; import io.odpf.firehose.message.Message; @@ -9,7 +10,6 @@ import io.odpf.firehose.consumer.TestMessage; import io.odpf.firehose.consumer.TestBookingLogMessage; import io.odpf.firehose.consumer.TestNestedRepeatedMessage; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.firehose.sink.redis.dataentry.RedisHashSetFieldEntry; import io.odpf.stencil.client.ClassLoadStencilClient; @@ -20,7 +20,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.util.IllegalFormatConversionException; import java.util.Properties; diff --git a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParserTest.java b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParserTest.java index ce5ecea11..3a414b2d2 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParserTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisKeyValueParserTest.java @@ -1,16 +1,17 @@ package io.odpf.firehose.sink.redis.parsers; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.redis.dataentry.RedisDataEntry; import io.odpf.firehose.sink.redis.dataentry.RedisKeyValueEntry; import io.odpf.stencil.Parser; import io.odpf.stencil.client.ClassLoadStencilClient; import io.odpf.stencil.client.StencilClient; import org.junit.Test; +import org.junit.jupiter.api.Assertions; import java.util.HashMap; import java.util.List; @@ -20,7 +21,6 @@ import static java.util.Collections.singletonMap; import static org.aeonbits.owner.ConfigFactory.create; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; public class RedisKeyValueParserTest { @@ -77,7 +77,7 @@ public void shouldThrowExceptionWhenKeyTemplateIsEmpty() { RedisSinkConfig redisSinkConfig = create(RedisSinkConfig.class, singletonMap("SINK_REDIS_KEY_TEMPLATE", "")); RedisKeyValueParser redisKeyValueParser = new RedisKeyValueParser(testKeyProtoParser, redisSinkConfig, null); IllegalArgumentException illegalArgumentException = - assertThrows(IllegalArgumentException.class, () -> redisKeyValueParser.parse(message)); + Assertions.assertThrows(IllegalArgumentException.class, () -> redisKeyValueParser.parse(message)); assertEquals("Template '' is invalid", illegalArgumentException.getMessage()); } @@ -90,7 +90,7 @@ public void shouldThrowExceptionForNoListProtoIndex() { Message message = new Message(testKeyByteArr, testKeyByteArr, "", 0, 0); RedisKeyValueParser redisKeyValueParser = new RedisKeyValueParser(testKeyProtoParser, redisSinkConfig, null); - IllegalArgumentException illegalArgumentException = assertThrows(IllegalArgumentException.class, + IllegalArgumentException illegalArgumentException = Assertions.assertThrows(IllegalArgumentException.class, () -> redisKeyValueParser.parse(message)); assertEquals("Please provide SINK_REDIS_KEY_VALUE_DATA_PROTO_INDEX in key value sink", illegalArgumentException.getMessage()); } diff --git a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisListParserTest.java b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisListParserTest.java index f9f14059b..c2856efa2 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisListParserTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisListParserTest.java @@ -1,14 +1,12 @@ package io.odpf.firehose.sink.redis.parsers; - - +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.config.enums.RedisSinkDataType; import io.odpf.firehose.message.Message; import io.odpf.firehose.consumer.TestBookingLogMessage; import io.odpf.firehose.consumer.TestKey; import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.sink.redis.dataentry.RedisListEntry; import io.odpf.stencil.client.ClassLoadStencilClient; import io.odpf.stencil.Parser; @@ -18,7 +16,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static junit.framework.TestCase.assertEquals; import static org.mockito.Mockito.when; diff --git a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactoryTest.java b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactoryTest.java index e7a1fcda3..0fc00930b 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactoryTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/parsers/RedisParserFactoryTest.java @@ -1,10 +1,10 @@ package io.odpf.firehose.sink.redis.parsers; +import io.odpf.depot.metrics.StatsDReporter; import io.odpf.firehose.config.RedisSinkConfig; import io.odpf.firehose.config.enums.RedisSinkDataType; import io.odpf.firehose.consumer.TestMessage; -import io.odpf.firehose.metrics.StatsDReporter; import io.odpf.firehose.proto.ProtoToFieldMapper; import io.odpf.stencil.Parser; import io.odpf.stencil.client.ClassLoadStencilClient; @@ -14,7 +14,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.util.Properties; diff --git a/src/test/java/io/odpf/firehose/sink/redis/ttl/DurationTTLTest.java b/src/test/java/io/odpf/firehose/sink/redis/ttl/DurationTTLTest.java index 47a174cc1..2ff62c958 100644 --- a/src/test/java/io/odpf/firehose/sink/redis/ttl/DurationTTLTest.java +++ b/src/test/java/io/odpf/firehose/sink/redis/ttl/DurationTTLTest.java @@ -4,7 +4,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProviderTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProviderTest.java index 79c70c4fe..827e7b0c1 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProviderTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/ExponentialBackOffProviderTest.java @@ -1,12 +1,12 @@ package io.odpf.firehose.sinkdecorator; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static java.lang.Math.toIntExact; import static org.mockito.Mockito.times; @@ -16,7 +16,7 @@ public class ExponentialBackOffProviderTest { @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private BackOff backOff; @@ -32,7 +32,7 @@ public class ExponentialBackOffProviderTest { @Before public void setup() { exponentialBackOffProvider = new ExponentialBackOffProvider(initialExpiryTimeInMS, backOffRate, - maximumBackoffTimeInMS, instrumentation, backOff); + maximumBackoffTimeInMS, firehoseInstrumentation, backOff); } @Test @@ -40,8 +40,8 @@ public void shouldBeWithinMaxBackoffTime() { exponentialBackOffProvider.backOff(100000000); verify(backOff).inMilliSeconds(maximumBackoffTimeInMS); - verify(instrumentation, times(1)).logWarn("backing off for {} milliseconds ", (long) maximumBackoffTimeInMS); - verify(instrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(maximumBackoffTimeInMS)); + verify(firehoseInstrumentation, times(1)).logWarn("backing off for {} milliseconds ", (long) maximumBackoffTimeInMS); + verify(firehoseInstrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(maximumBackoffTimeInMS)); } @Test @@ -50,15 +50,15 @@ public void shouldBackoffExponentially() { long sleepTime1 = 20; verify(backOff).inMilliSeconds(sleepTime1); - verify(instrumentation, times(1)).logWarn("backing off for {} milliseconds ", sleepTime1); - verify(instrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(sleepTime1)); + verify(firehoseInstrumentation, times(1)).logWarn("backing off for {} milliseconds ", sleepTime1); + verify(firehoseInstrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(sleepTime1)); exponentialBackOffProvider.backOff(4); long sleepTime2 = 160; verify(backOff).inMilliSeconds(sleepTime2); - verify(instrumentation, times(1)).logWarn("backing off for {} milliseconds ", sleepTime2); - verify(instrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(sleepTime2)); + verify(firehoseInstrumentation, times(1)).logWarn("backing off for {} milliseconds ", sleepTime2); + verify(firehoseInstrumentation, times(1)).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", toIntExact(sleepTime2)); } @Test @@ -72,7 +72,7 @@ public void shouldSleepForBackOffTimeOnFirstRetry() { public void shouldRecordStatsForBackOffTime() { exponentialBackOffProvider.backOff(0); - verify(instrumentation, times(1)).logWarn("backing off for {} milliseconds ", (long) initialExpiryTimeInMS); - verify(instrumentation).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", initialExpiryTimeInMS); + verify(firehoseInstrumentation, times(1)).logWarn("backing off for {} milliseconds ", (long) initialExpiryTimeInMS); + verify(firehoseInstrumentation).captureSleepTime("firehose_retry_backoff_sleep_milliseconds", initialExpiryTimeInMS); } } diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/SinkFinalTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/SinkFinalTest.java index 677b5c9f7..bf1fcf152 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/SinkFinalTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/SinkFinalTest.java @@ -1,7 +1,7 @@ package io.odpf.firehose.sinkdecorator; import io.odpf.firehose.message.Message; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.Sink; import org.junit.Test; @@ -16,8 +16,8 @@ public class SinkFinalTest { @Test public void shouldIgnoreMessages() throws IOException { Sink sink = Mockito.mock(Sink.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); - SinkFinal sinkFinal = new SinkFinal(sink, instrumentation); + FirehoseInstrumentation firehoseInstrumentation = Mockito.mock(FirehoseInstrumentation.class); + SinkFinal sinkFinal = new SinkFinal(sink, firehoseInstrumentation); List messages = new ArrayList() {{ add(new Message("".getBytes(), "".getBytes(), "", 0, 0)); add(new Message("".getBytes(), "".getBytes(), "", 0, 0)); @@ -25,7 +25,7 @@ public void shouldIgnoreMessages() throws IOException { Mockito.when(sink.pushMessage(messages)).thenReturn(messages); sinkFinal.pushMessage(messages); - Mockito.verify(instrumentation, Mockito.times(1)).logInfo("Ignoring messages {}", 2); - Mockito.verify(instrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.IGNORED, 2); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("Ignoring messages {}", 2); + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.IGNORED, 2); } } diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithDlqTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithDlqTest.java index 7f8db5328..25acc9c17 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithDlqTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithDlqTest.java @@ -1,16 +1,15 @@ package io.odpf.firehose.sinkdecorator; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.DlqConfig; import io.odpf.firehose.config.ErrorConfig; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.message.Message; import io.odpf.firehose.error.ErrorHandler; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; import io.odpf.firehose.sink.dlq.DlqWriter; import org.aeonbits.owner.ConfigFactory; -import org.hamcrest.Matchers; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -26,8 +25,6 @@ import static io.odpf.firehose.metrics.Metrics.DLQ_MESSAGES_TOTAL; import static io.odpf.firehose.metrics.Metrics.DLQ_RETRY_ATTEMPTS_TOTAL; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; @@ -43,7 +40,7 @@ public class SinkWithDlqTest { private Message message; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private DlqWriter dlqWriter; @@ -72,15 +69,15 @@ public void shouldWriteToDLQWriter() throws Exception { when(message.getErrorInfo()).thenReturn(new ErrorInfo(new RuntimeException(), ErrorType.DESERIALIZATION_ERROR)); when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); List pushResult = sinkWithDlq.pushMessage(messages); verify(dlqWriter, times(1)).write(messages); assertEquals(0, pushResult.size()); - verify(instrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); - verify(instrumentation, times(1)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); - verify(instrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 2); + verify(firehoseInstrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); + verify(firehoseInstrumentation, times(1)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); + verify(firehoseInstrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 2); } @Test @@ -88,7 +85,7 @@ public void shouldNotWriteToDLQWhenDlqMessagesIsEmpty() throws IOException { ArrayList messages = new ArrayList<>(); when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); verify(dlqWriter, never()).write(messages); @@ -103,7 +100,7 @@ public void shouldThrowIOExceptionWhenWriterThrowIOException() throws IOExceptio when(message.getErrorInfo()).thenReturn(new ErrorInfo(new RuntimeException(), ErrorType.DESERIALIZATION_ERROR)); when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); } @@ -122,18 +119,18 @@ public void shouldRetryWriteMessagesToDlqUntilRetryMessagesEmpty() throws IOExce when(dlqWriter.write(messages)).thenReturn(dlqRetryMessages); when(dlqWriter.write(dlqRetryMessages)).thenReturn(new ArrayList<>()); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); verify(dlqWriter, times(1)).write(messages); verify(dlqWriter, times(1)).write(dlqRetryMessages); - verify(instrumentation, times(1)).captureDLQErrors(any(), any()); + verify(firehoseInstrumentation, times(1)).captureDLQErrors(any(), any()); - verify(instrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); - verify(instrumentation, times(2)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); - verify(instrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 2); + verify(firehoseInstrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 2); + verify(firehoseInstrumentation, times(2)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); + verify(firehoseInstrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 2); } @Test(expected = IOException.class) @@ -156,7 +153,7 @@ public void shouldThrowIOExceptionWhenExceedMaxRetryAttemptsButButHasFailedToBeD when(dlqWriter.write(dlqRetryMessages)).thenReturn(dlqRetryMessages); when(dlqWriter.write(dlqRetryMessages)).thenReturn(dlqRetryMessages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); } @@ -178,7 +175,7 @@ public void shouldNotThrowIOExceptionWhenFailOnMaxRetryAttemptDisabled() throws when(dlqWriter.write(dlqRetryMessages)).thenReturn(dlqRetryMessages); when(dlqWriter.write(dlqRetryMessages)).thenReturn(dlqRetryMessages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); } @@ -202,7 +199,7 @@ public void shouldCommitOffsetsOfDlqMessagesWhenSinkManageOffset() throws IOExce when(sinkWithRetry.pushMessage(messages)).thenReturn(dlqProcessedMessages); when(dlqWriter.write(anyList())).thenReturn(new LinkedList<>()); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); List pushResult = sinkWithDlq.pushMessage(messages); verify(sinkWithRetry, times(1)).addOffsetsAndSetCommittable(dlqProcessedMessages); @@ -215,7 +212,7 @@ public void shouldNotRegisterAndCommitOffsetWhenNoMessagesIsProcessedByDLQ() thr ArrayList messages = new ArrayList<>(); when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); sinkWithDlq.pushMessage(messages); verify(sinkWithRetry, never()).addOffsetsAndSetCommittable(anyList()); @@ -229,13 +226,13 @@ public void shouldWriteDlqMessagesWhenErrorTypesConfigured() throws IOException messages.add(messageWithError); messages.add(new Message(message, new ErrorInfo(null, ErrorType.SINK_UNKNOWN_ERROR))); when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); List pushResult = sinkWithDlq.pushMessage(messages); - ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(List.class); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(List.class); verify(dlqWriter, times(1)).write(argumentCaptor.capture()); assertEquals(1, argumentCaptor.getValue().size()); - assertThat(argumentCaptor.getValue(), Matchers.contains(messageWithError)); + assertEquals(messageWithError, argumentCaptor.getValue().get(0)); assertEquals(1, pushResult.size()); } @@ -249,15 +246,15 @@ public void shouldInstrumentFailure() throws Exception { when(sinkWithRetry.pushMessage(anyList())).thenReturn(messages); when(dlqWriter.write(anyList())).thenReturn(messages); - SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, instrumentation); + SinkWithDlq sinkWithDlq = new SinkWithDlq(sinkWithRetry, dlqWriter, backOffProvider, dlqConfig, errorHandler, firehoseInstrumentation); List pushResult = sinkWithDlq.pushMessage(messages); verify(dlqWriter, times(10)).write(messages); assertEquals(2, pushResult.size()); - verify(instrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 0); - verify(instrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(10)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); - verify(instrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 0); + verify(firehoseInstrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(1)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 0); + verify(firehoseInstrumentation, times(2)).captureMessageMetrics(DLQ_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(10)).incrementCounter(DLQ_RETRY_ATTEMPTS_TOTAL); + verify(firehoseInstrumentation, times(1)).captureGlobalMessageMetrics(Metrics.MessageScope.DLQ, 0); } } diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithFailHandlerTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithFailHandlerTest.java index 12070e2b4..6d1198c1b 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithFailHandlerTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithFailHandlerTest.java @@ -1,8 +1,8 @@ package io.odpf.firehose.sinkdecorator; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.ErrorConfig; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.message.Message; import io.odpf.firehose.error.ErrorHandler; import io.odpf.firehose.exception.SinkException; @@ -12,7 +12,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.HashMap; diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java index 99ce8b6ac..2da50b544 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java @@ -1,17 +1,16 @@ package io.odpf.firehose.sinkdecorator; +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.AppConfig; import io.odpf.firehose.config.ErrorConfig; import io.odpf.firehose.message.Message; import io.odpf.firehose.error.ErrorHandler; -import io.odpf.firehose.error.ErrorInfo; -import io.odpf.firehose.error.ErrorType; import io.odpf.firehose.exception.DeserializerException; -import io.odpf.firehose.metrics.Instrumentation; +import io.odpf.firehose.metrics.FirehoseInstrumentation; import io.odpf.firehose.metrics.Metrics; -import io.odpf.firehose.sink.log.KeyOrMessageParser; +import io.odpf.firehose.sink.common.KeyOrMessageParser; import org.aeonbits.owner.ConfigFactory; -import org.hamcrest.Matchers; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -29,7 +28,6 @@ import static io.odpf.firehose.metrics.Metrics.RETRY_MESSAGES_TOTAL; import static io.odpf.firehose.metrics.Metrics.RETRY_ATTEMPTS_TOTAL; import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.*; import static org.mockito.MockitoAnnotations.initMocks; @@ -45,7 +43,7 @@ public class SinkWithRetryTest { private Message message; @Mock - private Instrumentation instrumentation; + private FirehoseInstrumentation firehoseInstrumentation; @Mock private KeyOrMessageParser parser; @@ -69,7 +67,7 @@ public void setUp() { @Test public void shouldReturnEmptyListIfSuperReturnsEmptyList() throws IOException, DeserializerException { when(sinkDecorator.pushMessage(anyList())).thenReturn(new ArrayList<>()); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messages = sinkWithRetry.pushMessage( Collections.singletonList(new Message("key".getBytes(), "value".getBytes(), "topic", 1, 1))); @@ -85,7 +83,7 @@ public void shouldRetryForNumberOfAttemptsIfSuperReturnsEsbMessages() throws IOE when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(messages) .thenReturn(messages); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); @@ -101,7 +99,7 @@ public void shouldRetryForNumberOfAttemptsAndSendEmptyMessageOnSuccess() throws when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages) .thenReturn(new ArrayList<>()); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); @@ -119,7 +117,7 @@ public void shouldRetryUntilSuccess() throws IOException, DeserializerException when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(messages) .thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); @@ -134,21 +132,21 @@ public void shouldLogRetriesMessages() throws IOException, DeserializerException messages.add(message); messages.add(message); when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); - when(instrumentation.isDebugEnabled()).thenReturn(true); + when(firehoseInstrumentation.isDebugEnabled()).thenReturn(true); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(messages) .thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); assertTrue(messageList.isEmpty()); - verify(instrumentation, times(1)).logInfo("Maximum retry attempts: {}", 10); - verify(instrumentation, times(5)).incrementCounter("firehose_retry_attempts_total"); - verify(instrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 1, 2); - verify(instrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 2, 2); - verify(instrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 3, 2); - verify(instrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 4, 2); - verify(instrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 5, 2); - verify(instrumentation, times(5)).logDebug("Retry failed messages: \n{}", "[null, null]"); + verify(firehoseInstrumentation, times(1)).logInfo("Maximum retry attempts: {}", 10); + verify(firehoseInstrumentation, times(5)).incrementCounter("firehose_retry_attempts_total"); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 1, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 2, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 3, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 4, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 5, 2); + verify(firehoseInstrumentation, times(5)).logDebug("Retry failed messages: \n{}", "[null, null]"); } @Test @@ -159,14 +157,14 @@ public void shouldAddInstrumentationForRetry() throws Exception { messages.add(message); messages.add(message); when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); assertTrue(messageList.isEmpty()); - verify(instrumentation, times(1)).logInfo("Maximum retry attempts: {}", 3); - verify(instrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(2)).incrementCounter(RETRY_ATTEMPTS_TOTAL); - verify(instrumentation, times(1)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 3); + verify(firehoseInstrumentation, times(1)).logInfo("Maximum retry attempts: {}", 3); + verify(firehoseInstrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(2)).incrementCounter(RETRY_ATTEMPTS_TOTAL); + verify(firehoseInstrumentation, times(1)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 3); } @Test @@ -177,15 +175,15 @@ public void shouldAddInstrumentationForRetryFailures() throws Exception { messages.add(message); messages.add(message); when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); assertFalse(messageList.isEmpty()); - verify(instrumentation, times(1)).logInfo("Maximum retry attempts: {}", 1); - verify(instrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); - verify(instrumentation, times(1)).incrementCounter(RETRY_ATTEMPTS_TOTAL); - verify(instrumentation, times(1)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 0); - verify(instrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(1)).logInfo("Maximum retry attempts: {}", 1); + verify(firehoseInstrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.TOTAL, ErrorType.DESERIALIZATION_ERROR, 1); + verify(firehoseInstrumentation, times(1)).incrementCounter(RETRY_ATTEMPTS_TOTAL); + verify(firehoseInstrumentation, times(1)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.SUCCESS, 0); + verify(firehoseInstrumentation, times(3)).captureMessageMetrics(RETRY_MESSAGES_TOTAL, Metrics.MessageType.FAILURE, ErrorType.DESERIALIZATION_ERROR, 1); } @Test(expected = IOException.class) @@ -199,7 +197,7 @@ public void shouldThrowIOExceptionWhenExceedMaximumRetryAttempts() throws IOExce when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(messages) .thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); sinkWithRetry.pushMessage(Collections.singletonList(message)); } @@ -215,17 +213,17 @@ public void shouldRetryMessagesWhenErrorTypesConfigured() throws IOException { HashSet errorTypes = new HashSet<>(); errorTypes.add(ErrorType.DESERIALIZATION_ERROR); - SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, instrumentation, appConfig, parser, errorHandler); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); List messageList = sinkWithRetry.pushMessage(messages); assertEquals(1, messageList.size()); - ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(List.class); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(List.class); verify(sinkDecorator, times(2)).pushMessage(argumentCaptor.capture()); - List> args = argumentCaptor.getAllValues(); + List args = argumentCaptor.getAllValues(); assertEquals(2, args.get(0).size()); assertEquals(1, args.get(1).size()); - assertThat(args.get(1), Matchers.contains(messageWithError)); + assertEquals(messageWithError, args.get(1).get(0)); } } From 17e6443e7704e02ceee9866a7ee0e4d5e39b52d3 Mon Sep 17 00:00:00 2001 From: lavkesh Date: Tue, 24 May 2022 07:24:50 +0300 Subject: [PATCH 2/9] chore: version bump --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 57a20c308..85fc857cd 100644 --- a/build.gradle +++ b/build.gradle @@ -101,7 +101,7 @@ dependencies { implementation 'com.google.cloud:google-cloud-storage:1.114.0' implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' implementation 'org.apache.logging.log4j:log4j-core:2.17.1' - implementation group: 'io.odpf', name: 'depot', version: '0.1.3' + implementation group: 'io.odpf', name: 'depot', version: '0.1.4' implementation group: 'com.networknt', name: 'json-schema-validator', version: '1.0.59' exclude group: 'org.slf4j' testImplementation group: 'junit', name: 'junit', version: '4.11' From a6b76b68f0ea8170dbfc37e8820a1545718d6bef Mon Sep 17 00:00:00 2001 From: Lavkesh Lahngir Date: Tue, 31 May 2022 12:03:35 +0300 Subject: [PATCH 3/9] Documentation on depot repo (#170) * chore: version bump * docs: add docs for depot repo * docs: add bigquery metrics --- docs/docs/concepts/architecture.md | 1 + docs/docs/contribute/contribution.md | 2 +- docs/docs/reference/metrics.md | 18 +----------------- docs/reference/configuration/bigquery-sink.md | 6 ++++++ 4 files changed, 9 insertions(+), 18 deletions(-) create mode 100644 docs/reference/configuration/bigquery-sink.md diff --git a/docs/docs/concepts/architecture.md b/docs/docs/concepts/architecture.md index a4b9b7157..ead996e16 100644 --- a/docs/docs/concepts/architecture.md +++ b/docs/docs/concepts/architecture.md @@ -40,6 +40,7 @@ _**Sink**_ - All the existing sink types follow the same contract/lifecycle defined in `AbstractSink.java`. It consists of two stages: - Prepare: Transformation over-filtered messages’ list to prepare the sink-specific insert/update client requests. - Execute: Requests created in the Prepare stage are executed at this step and a list of failed messages is returned \(if any\) for retry. +- Underlying implementation of AbstractSink can use implementation present in [depot](https://github.com/odpf/depot). - If the batch has any failures, Firehose will retry to push the failed messages to the sink _**SinkPool**_ diff --git a/docs/docs/contribute/contribution.md b/docs/docs/contribute/contribution.md index 685de333a..5d33f3a52 100644 --- a/docs/docs/contribute/contribution.md +++ b/docs/docs/contribute/contribution.md @@ -30,7 +30,7 @@ To help you get your feet wet and get you familiar with our contribution process We use RFCs and GitHub issues to communicate ideas. - You can report a bug or suggest a feature enhancement or can just ask questions. Reach out on Github discussions for this purpose. -- You are also welcome to add a new sink, improve monitoring and logging and improve code quality. +- You are also welcome to add a new common sink in [depot](https://github.com/odpf/depot), improve monitoring and logging and improve code quality. - You can help with documenting new features or improve existing documentation. - You can also review and accept other contributions if you are a maintainer. diff --git a/docs/docs/reference/metrics.md b/docs/docs/reference/metrics.md index ccd615be1..4ca5c97b0 100644 --- a/docs/docs/reference/metrics.md +++ b/docs/docs/reference/metrics.md @@ -16,6 +16,7 @@ Service-level Indicators \(SLIs\) are the measurements used to calculate the per * [HTTP Sink](metrics.md#http-sink) * [Filter](metrics.md#filter) * [Blob Sink](metrics.md#blob-sink) +* [Bigquery Sink](https://github.com/odpf/depot/blob/main/docs/reference/metrics.md#bigquery-sink) ## Type Details @@ -368,20 +369,3 @@ Total Size of the uploaded file in bytes. ### `File Upload Records Total` Total number records inside files that successfully being uploaded to blob storage. - -## Bigquery Sink - -### `Biquery Operation Total` - -Total number of bigquery API operation performed - -### `Bigquery Operation Latency` - -Time taken for bigquery API operation performed - -### `Bigquery Errors Total` - -Total numbers of error occurred on bigquery insert operation. - - - diff --git a/docs/reference/configuration/bigquery-sink.md b/docs/reference/configuration/bigquery-sink.md new file mode 100644 index 000000000..81de0041a --- /dev/null +++ b/docs/reference/configuration/bigquery-sink.md @@ -0,0 +1,6 @@ +# Bigquery Sink + +A Bigquery sink Firehose \(`SINK_TYPE`=`bigquery`\) requires env variables to be set along with Generic ones and +env variables in depot repository. The Firehose sink uses bigquery implementation available [depot](https://github.com/odpf/depot) repository. + +[Configuration of Bigquery Sink] (https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md) \ No newline at end of file From 6098b24346f4f8a428283101cbe819766d3fddca Mon Sep 17 00:00:00 2001 From: lavkesh Date: Wed, 22 Jun 2022 09:35:26 +0200 Subject: [PATCH 4/9] chore: version bump and reabse from main --- build.gradle | 2 +- docs/docs/sinks/bigquery-sink.md | 212 +----------------- docs/reference/configuration/bigquery-sink.md | 6 - 3 files changed, 4 insertions(+), 216 deletions(-) delete mode 100644 docs/reference/configuration/bigquery-sink.md diff --git a/build.gradle b/build.gradle index 85fc857cd..973b61952 100644 --- a/build.gradle +++ b/build.gradle @@ -101,7 +101,7 @@ dependencies { implementation 'com.google.cloud:google-cloud-storage:1.114.0' implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' implementation 'org.apache.logging.log4j:log4j-core:2.17.1' - implementation group: 'io.odpf', name: 'depot', version: '0.1.4' + implementation group: 'io.odpf', name: 'depot', version: '0.1.5' implementation group: 'com.networknt', name: 'json-schema-validator', version: '1.0.59' exclude group: 'org.slf4j' testImplementation group: 'junit', name: 'junit', version: '4.11' diff --git a/docs/docs/sinks/bigquery-sink.md b/docs/docs/sinks/bigquery-sink.md index 9dd2d6d0c..1f8989dcf 100644 --- a/docs/docs/sinks/bigquery-sink.md +++ b/docs/docs/sinks/bigquery-sink.md @@ -1,212 +1,6 @@ # BigQuery -Bigquery Sink has several responsibilities, first creation of bigquery table and dataset when they are not exist, second update the bigquery table schema based on the latest protobuf schema, third translate protobuf messages into bigquery records and insert them to bigquery tables. -Bigquery utilise Bigquery [Streaming API](https://cloud.google.com/bigquery/streaming-data-into-bigquery) to insert record into bigquery tables. +A Bigquery sink Firehose \(`SINK_TYPE`=`bigquery`\) requires env variables to be set along with Generic ones and +env variables in depot repository. The Firehose sink uses bigquery implementation available [depot](https://github.com/odpf/depot) repository. -## Asynchronous consumer mode - -Bigquery Streaming API limits size of payload sent for each insert operations. The limitation reduces the amount of message allowed to be inserted when the message size is big. -This will reduce the throughput of bigquery sink. To increase the throughput, firehose provide kafka consumer asynchronous mode. -In asynchronous mode sink operation is executed asynchronously, so multiple sink task can be scheduled and run concurrently. -Throughput can be increased by increasing the number of sink pool. - -## At Least Once Guarantee - -Because of asynchronous consumer mode and the possibility of retry on the insert operation. There is no guarantee of the message order that successfully sent to the sink. -That also happened with commit offset, the there is no order of the offset number of the processed messages. -Firehose collect all the offset sort them and only commit the latest continuous offset. -This will ensure all the offset being committed after messages successfully processed even when some messages are being re processed by retry handler or when the insert operation took a long time. - -## Bigquery table schema update - -Bigquery Sink update the bigquery table schema on separate table update operation. Bigquery utilise [Stencil](https://github.com/odpf/stencil) to parse protobuf messages generate schema and update bigquery tables with the latest schema. -The stencil client periodically reload the descriptor cache. Table schema update happened after the descriptor caches uploaded. -Because firehose is horizontally scalable multiple firehose consumer might be running. -Because there is no coordination strategy between consumers the schema update will be triggered by all consumers. - -## Protobuf and BigQuery table type mapping - -Here are type conversion between protobuf type and bigquery type : - -| Protobuf Type | Bigquery Type | -| ---------------------------------------------------------------------------------- | ------------------------ | -| bytes | BYTES | -| string | STRING | -| enum | STRING | -| float | FLOAT | -| double | FLOAT | -| bool | BOOLEAN | -| int64, uint64, int32, uint32, fixed64, fixed32, sfixed64, sfixed32, sint64, sint32 | INTEGER | -| message | RECORD | -| .google.protobuf.Timestamp | TIMESTAMP | -| .google.protobuf.Struct | STRING (Json Serialised) | -| .google.protobuf.Duration | RECORD | - -## Modifier - -| Protobuf Modifier | Bigquery Modifier | -| ----------------- | ----------------- | -| repeated | REPEATED | - -## Partitioning - -Bigquery Sink supports creation of table with partition configuration. Currently, Bigquery Sink only supports time based partitioning. -To have time based partitioning protobuf `Timestamp` as field is needed on the protobuf message. The protobuf field will be used as partitioning column on table creation. -The time partitioning type that is currently supported is `DAY` partitioning. - -## Kafka Metadata - -For data quality checking purpose sometimes kafka metadata need to be added on the record. When `SINK_BIGQUERY_METADATA_NAMESPACE` is configured kafka metadata column will be added, here is the list of kafka metadata column to be added : - -| Fully Qualified Column Name | Type | Modifier | -| --------------------------------- | --------- | -------- | -| metadata_column | RECORD | NULLABLE | -| metadata_column.message_partition | INTEGER | NULLABLE | -| metadata_column.message_offset | INTEGER | NULLABLE | -| metadata_column.message_topic | STRING | NULLABLE | -| metadata_column.message_timestamp | TIMESTAMP | NULLABLE | -| metadata_column.load_time | TIMESTAMP | NULLABLE | - -## Error handling - -Firehose consumer parse errors from table insertion, translate the error into generic error types and attach them for each message that failed to be inserted to bigquery. -Users can configure how to handle each generic error types accordingly. -Here is mapping of the error translation to generic firehose error types : - -| Error Name | Generic Error Type | Description | -| -------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | -| Stopped Error | SINK_5XX_ERROR | Error on a row insertion that happened because insert job is cancelled because other record is invalid although current record is valid | -| Out of bounds Error | SINK_4XX_ERROR | Error on a row insertion the partitioned column has a date value less than 5 years and more than 1 year in the future | -| Invalid schema Error | SINK_4XX_ERROR | Error on a row insertion when there is a new field that is not exist on the table or when there is required field on the table | -| Other Error | SINK_UNKNOWN_ERROR | Uncategorized error | - -## Google Cloud Bigquery IAM Permission - -Several IAM permission is required for bigquery sink to run properly, - -- Create and update Dataset - - bigquery.tables.create - - bigquery.tables.get - - bigquery.tables.update -- Create and update Table - - bigquery.datasets.create - - bigquery.datasets.get - - bigquery.datasets.update -- Stream insert to Table - - bigquery.tables.updateData - -Further documentation on bigquery IAM permission [here](https://cloud.google.com/bigquery/streaming-data-into-bigquery). - -## Configurations - -A Bigquery sink Firehose \(`SINK_TYPE`=`bigquery`\) requires the following variables to be set along with Generic ones - -### `SINK_BIGQUERY_GOOGLE_CLOUD_PROJECT_ID` - -Contains information of google cloud project id location of the bigquery table where the records need to be inserted. Further documentation on google cloud [project id](https://cloud.google.com/resource-manager/docs/creating-managing-projects). - -- Example value: `gcp-project-id` -- Type: `required` - -### `SINK_BIGQUERY_TABLE_NAME` - -The name of bigquery table. Here is further documentation of bigquery [table naming](https://cloud.google.com/bigquery/docs/tables). - -- Example value: `user_profile` -- Type: `required` - -### `SINK_BIGQUERY_DATASET_NAME` - -The name of dataset that contains the bigquery table. Here is further documentation of bigquery [dataset naming](https://cloud.google.com/bigquery/docs/datasets). - -- Example value: `customer` -- Type: `required` - -### `SINK_BIGQUERY_DATASET_LABELS` - -Labels of a bigquery dataset, key-value information separated by comma attached to the bigquery dataset. This configuration define labels that will be set to the bigquery dataset. Here is further documentation of bigquery [labels](https://cloud.google.com/bigquery/docs/labels-intro). - -- Example value: `owner=data-engineering,granurality=daily` -- Type: `optional` - -### `SINK_BIGQUERY_TABLE_LABELS` - -Labels of a bigquery table, key-value information separated by comma attached to the bigquery table. This configuration define labels that will be set to the bigquery dataset. Here is further documentation of bigquery [labels](https://cloud.google.com/bigquery/docs/labels-intro). - -- Example value: `owner=data-engineering,granurality=daily` -- Type: `optional` - -### `SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE` - -Configuration for enable table partitioning. This config will be used for provide partitioning config when creating the bigquery table. -Bigquery table partitioning config can only be set once, on the table creation and the partitioning cannot be disabled once created. Changing this value of this config later will cause error when firehose trying to update the bigquery table. -Here is further documentation of bigquery [table partitioning](https://cloud.google.com/bigquery/docs/partitioned-tables). - -- Example value: `true` -- Type: `required` -- Default value: `false` - -### `SINK_BIGQUERY_TABLE_PARTITION_KEY` - -Define protobuf/bigquery field name that will be used for bigquery table partitioning. only protobuf `Timestamp` field, that later converted into bigquery `Timestamp` column that is supported as partitioning key. -Currently, this sink only support `DAY` time partitioning type. -Here is further documentation of bigquery [column time partitioning](https://cloud.google.com/bigquery/docs/creating-partitioned-tables#console). - -- Example value: `event_timestamp` -- Type: `required` - -### `SINK_BIGQUERY_ROW_INSERT_ID_ENABLE` - -This config enables adding of ID row intended for deduplication when inserting new records into bigquery. -Here is further documentation of bigquery streaming insert [deduplication](https://cloud.google.com/bigquery/streaming-data-into-bigquery). - -- Example value: `false` -- Type: `required` -- Default value: `true` - -### `SINK_BIGQUERY_CREDENTIAL_PATH` - -Full path of google cloud credentials file. Here is further documentation of google cloud authentication and [credentials](https://cloud.google.com/docs/authentication/getting-started). - -- Example value: `/.secret/google-cloud-credentials.json` -- Type: `required` - -### `SINK_BIGQUERY_METADATA_NAMESPACE` - -The name of column that will be added alongside of the existing bigquery column that generated from protobuf, that column contains struct of kafka metadata of the inserted record. -When this config is not configured the metadata column will not be added to the table. - -- Example value: `kafka_metadata` -- Type: `optional` - -### `SINK_BIGQUERY_DATASET_LOCATION` - -The geographic region name of location of bigquery dataset. Further documentation on bigquery dataset [location](https://cloud.google.com/bigquery/docs/locations#dataset_location). - -- Example value: `us-central1` -- Type: `optional` -- Default value: `asia-southeast1` - -### `SINK_BIGQUERY_TABLE_PARTITION_EXPIRY_MS` - -The duration of bigquery table partitioning expiration in milliseconds. Fill this config with `-1` will disable the table partition expiration. Further documentation on bigquery table partition [expiration](https://cloud.google.com/bigquery/docs/managing-partitioned-tables#partition-expiration). - -- Example value: `2592000000` -- Type: `optional` -- Default value: `-1` - -### `SINK_BIGQUERY_CLIENT_READ_TIMEOUT_MS` - -The duration of bigquery client http read timeout in milliseconds, 0 for an infinite timeout, a negative number for the default value (20000). - -- Example value: `20000` -- Type: `optional` -- Default value: `-1` - -### `SINK_BIGQUERY_CLIENT_CONNECT_TIMEOUT_MS` - -The duration of bigquery client http connection timeout in milliseconds, 0 for an infinite timeout, a negative number for the default value (20000). - -- Example value: `20000` -- Type: `optional` -- Default value: `-1` +[Configuration of Bigquery Sink] (https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md) \ No newline at end of file diff --git a/docs/reference/configuration/bigquery-sink.md b/docs/reference/configuration/bigquery-sink.md deleted file mode 100644 index 81de0041a..000000000 --- a/docs/reference/configuration/bigquery-sink.md +++ /dev/null @@ -1,6 +0,0 @@ -# Bigquery Sink - -A Bigquery sink Firehose \(`SINK_TYPE`=`bigquery`\) requires env variables to be set along with Generic ones and -env variables in depot repository. The Firehose sink uses bigquery implementation available [depot](https://github.com/odpf/depot) repository. - -[Configuration of Bigquery Sink] (https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md) \ No newline at end of file From e0e0478affbbcab7a77bccdf4dcdd67770cdbd61 Mon Sep 17 00:00:00 2001 From: lavkesh Date: Mon, 27 Jun 2022 16:32:33 +0100 Subject: [PATCH 5/9] fix: fixed tests after merging --- .../odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java b/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java index 4b1b6c740..2092d0ac5 100644 --- a/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java +++ b/src/test/java/io/odpf/firehose/consumer/ConsumerAndOffsetManagerTest.java @@ -113,7 +113,7 @@ public void shouldCommitAfterDelay() throws InterruptedException { add(s3); }}; FirehoseKafkaConsumer consumer = Mockito.mock(FirehoseKafkaConsumer.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + FirehoseInstrumentation instrumentation = Mockito.mock(FirehoseInstrumentation.class); OffsetManager offsetManager = new OffsetManager(); KafkaConsumerConfig config = ConfigFactory.create(KafkaConsumerConfig.class, new HashMap() {{ put("SOURCE_KAFKA_CONSUMER_CONFIG_MANUAL_COMMIT_MIN_INTERVAL_MS", "500"); @@ -138,7 +138,7 @@ public void shouldCommitWithoutDelay() { add(s3); }}; FirehoseKafkaConsumer consumer = Mockito.mock(FirehoseKafkaConsumer.class); - Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + FirehoseInstrumentation instrumentation = Mockito.mock(FirehoseInstrumentation.class); OffsetManager offsetManager = new OffsetManager(); KafkaConsumerConfig config = ConfigFactory.create(KafkaConsumerConfig.class, new HashMap() {{ put("SOURCE_KAFKA_CONSUMER_CONFIG_MANUAL_COMMIT_MIN_INTERVAL_MS", "-1"); From abdb7da52c53ec553d6c6dfb7fb0b6d1110cb057 Mon Sep 17 00:00:00 2001 From: kevinbheda Date: Wed, 27 Jul 2022 16:20:18 +0530 Subject: [PATCH 6/9] feat: json support for bq sink (#173) * feat: json support for bq sink * feat: add/update depot configs in sinkFactoryUtils * chore: version bump and removing json related configs * feat: add json schema type * fix: fix typo and handle json messages on log dlq writer (#180) * fix: fix typo and handle json messages on log dlq writer * test: add logDlqwriter unit test * chore: version bump Co-authored-by: kevin.bheda Co-authored-by: mayur.gubrele <2310-mayur.gubrele@users.noreply.source.golabs.io> Co-authored-by: lavkesh Co-authored-by: jesrypandawa <78069094+jesrypandawa@users.noreply.github.com> --- build.gradle | 2 +- docs/docs/advance/generic.md | Bin 5534 -> 5671 bytes .../io/odpf/firehose/config/AppConfig.java | 19 ++- .../firehose/config/BigQuerySinkConfig.java | 61 ---------- .../converter/InputSchemaTypeConverter.java | 13 +++ .../config/enums/InputSchemaType.java | 6 + .../consumer/FirehoseConsumerFactory.java | 4 +- .../io/odpf/firehose/sink/SinkFactory.java | 7 +- .../odpf/firehose/sink/SinkFactoryUtils.java | 5 +- .../firehose/sink/dlq/log/LogDlqWriter.java | 4 +- .../firehose/sinkdecorator/SinkWithRetry.java | 25 +++- .../InputSchemaTypeConverterTest.java | 24 ++++ .../firehose/sink/SinkFactoryUtilsTest.java | 4 +- .../firehose/sink/dlq/LogDlqWriterTest.java | 108 ++++++++++++++++++ .../sinkdecorator/SinkWithRetryTest.java | 29 +++++ 15 files changed, 224 insertions(+), 87 deletions(-) delete mode 100644 src/main/java/io/odpf/firehose/config/BigQuerySinkConfig.java create mode 100644 src/main/java/io/odpf/firehose/config/converter/InputSchemaTypeConverter.java create mode 100644 src/main/java/io/odpf/firehose/config/enums/InputSchemaType.java create mode 100644 src/test/java/io/odpf/firehose/config/converter/InputSchemaTypeConverterTest.java create mode 100644 src/test/java/io/odpf/firehose/sink/dlq/LogDlqWriterTest.java diff --git a/build.gradle b/build.gradle index fc6eca82d..a1f88e226 100644 --- a/build.gradle +++ b/build.gradle @@ -101,7 +101,7 @@ dependencies { implementation 'com.google.cloud:google-cloud-storage:1.114.0' implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' implementation 'org.apache.logging.log4j:log4j-core:2.17.1' - implementation group: 'io.odpf', name: 'depot', version: '0.1.5' + implementation group: 'io.odpf', name: 'depot', version: '0.1.6' implementation group: 'com.networknt', name: 'json-schema-validator', version: '1.0.59' exclude group: 'org.slf4j' testImplementation group: 'junit', name: 'junit', version: '4.11' diff --git a/docs/docs/advance/generic.md b/docs/docs/advance/generic.md index 93764c6dc4fbb04e3a200f67a06344e8538e5a06..308e339135ad48f89ebcd8c23367fd14fac5a4f5 100644 GIT binary patch delta 142 zcmbQIyFUe10E=^0==p8Hy0A$%PSO5S3 delta 10 RcmZ3kGf#U$(8k+7q5v5+1Z)5R diff --git a/src/main/java/io/odpf/firehose/config/AppConfig.java b/src/main/java/io/odpf/firehose/config/AppConfig.java index 5dd7ef43b..612dbb2a1 100644 --- a/src/main/java/io/odpf/firehose/config/AppConfig.java +++ b/src/main/java/io/odpf/firehose/config/AppConfig.java @@ -1,9 +1,11 @@ package io.odpf.firehose.config; +import io.odpf.firehose.config.converter.InputSchemaTypeConverter; import io.odpf.firehose.config.converter.ProtoIndexToFieldMapConverter; import io.odpf.firehose.config.converter.SchemaRegistryHeadersConverter; import io.odpf.firehose.config.converter.SchemaRegistryRefreshConverter; import io.odpf.firehose.config.converter.SinkTypeConverter; +import io.odpf.firehose.config.enums.InputSchemaType; import io.odpf.firehose.config.enums.SinkType; import io.odpf.stencil.cache.SchemaRefreshStrategy; @@ -15,18 +17,6 @@ public interface AppConfig extends Config { - @Key("METRIC_STATSD_HOST") - @DefaultValue("localhost") - String getMetricStatsDHost(); - - @Key("METRIC_STATSD_PORT") - @DefaultValue("8125") - Integer getMetricStatsDPort(); - - @Key("METRIC_STATSD_TAGS") - @DefaultValue("") - String getMetricStatsDTags(); - @Key("SINK_TYPE") @ConverterClass(SinkTypeConverter.class) SinkType getSinkType(); @@ -80,6 +70,11 @@ public interface AppConfig extends Config { @Key("INPUT_SCHEMA_PROTO_CLASS") String getInputSchemaProtoClass(); + @Key("INPUT_SCHEMA_DATA_TYPE") + @DefaultValue("PROTOBUF") + @ConverterClass(InputSchemaTypeConverter.class) + InputSchemaType getInputSchemaType(); + @Key("INPUT_SCHEMA_PROTO_TO_COLUMN_MAPPING") @ConverterClass(ProtoIndexToFieldMapConverter.class) Properties getInputSchemaProtoToColumnMapping(); diff --git a/src/main/java/io/odpf/firehose/config/BigQuerySinkConfig.java b/src/main/java/io/odpf/firehose/config/BigQuerySinkConfig.java deleted file mode 100644 index ef68a531e..000000000 --- a/src/main/java/io/odpf/firehose/config/BigQuerySinkConfig.java +++ /dev/null @@ -1,61 +0,0 @@ -package io.odpf.firehose.config; - -import io.odpf.firehose.config.converter.LabelMapConverter; - -import java.util.Map; - -public interface BigQuerySinkConfig extends AppConfig { - - @Key("SINK_BIGQUERY_GOOGLE_CLOUD_PROJECT_ID") - String getGCloudProjectID(); - - @Key("SINK_BIGQUERY_TABLE_NAME") - String getTableName(); - - @Key("SINK_BIGQUERY_DATASET_LABELS") - @Separator(LabelMapConverter.ELEMENT_SEPARATOR) - @ConverterClass(LabelMapConverter.class) - Map getDatasetLabels(); - - @Key("SINK_BIGQUERY_TABLE_LABELS") - @Separator(LabelMapConverter.ELEMENT_SEPARATOR) - @ConverterClass(LabelMapConverter.class) - Map getTableLabels(); - - @Key("SINK_BIGQUERY_DATASET_NAME") - String getDatasetName(); - - @Key("SINK_BIGQUERY_CREDENTIAL_PATH") - String getBigQueryCredentialPath(); - - @Key("SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE") - @DefaultValue("false") - Boolean isTablePartitioningEnabled(); - - @Key("SINK_BIGQUERY_TABLE_PARTITION_KEY") - String getTablePartitionKey(); - - @Key("SINK_BIGQUERY_ROW_INSERT_ID_ENABLE") - @DefaultValue("true") - Boolean isRowInsertIdEnabled(); - - @Key("SINK_BIGQUERY_CLIENT_READ_TIMEOUT_MS") - @DefaultValue("-1") - int getBqClientReadTimeoutMS(); - - @Key("SINK_BIGQUERY_CLIENT_CONNECT_TIMEOUT_MS") - @DefaultValue("-1") - int getBqClientConnectTimeoutMS(); - - @Key("SINK_BIGQUERY_TABLE_PARTITION_EXPIRY_MS") - @DefaultValue("-1") - Long getBigQueryTablePartitionExpiryMS(); - - @Key("SINK_BIGQUERY_DATASET_LOCATION") - @DefaultValue("asia-southeast1") - String getBigQueryDatasetLocation(); - - @DefaultValue("") - @Key("SINK_BIGQUERY_METADATA_NAMESPACE") - String getBqMetadataNamespace(); -} diff --git a/src/main/java/io/odpf/firehose/config/converter/InputSchemaTypeConverter.java b/src/main/java/io/odpf/firehose/config/converter/InputSchemaTypeConverter.java new file mode 100644 index 000000000..8341a6f3f --- /dev/null +++ b/src/main/java/io/odpf/firehose/config/converter/InputSchemaTypeConverter.java @@ -0,0 +1,13 @@ +package io.odpf.firehose.config.converter; + +import io.odpf.firehose.config.enums.InputSchemaType; +import org.aeonbits.owner.Converter; + +import java.lang.reflect.Method; + +public class InputSchemaTypeConverter implements Converter { + @Override + public InputSchemaType convert(Method method, String input) { + return InputSchemaType.valueOf(input.trim().toUpperCase()); + } +} diff --git a/src/main/java/io/odpf/firehose/config/enums/InputSchemaType.java b/src/main/java/io/odpf/firehose/config/enums/InputSchemaType.java new file mode 100644 index 000000000..49fadc084 --- /dev/null +++ b/src/main/java/io/odpf/firehose/config/enums/InputSchemaType.java @@ -0,0 +1,6 @@ +package io.odpf.firehose.config.enums; + +public enum InputSchemaType { + PROTOBUF, + JSON +} diff --git a/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java b/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java index 1eade3ec8..d17e04616 100644 --- a/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java +++ b/src/main/java/io/odpf/firehose/consumer/FirehoseConsumerFactory.java @@ -161,8 +161,8 @@ private Sink createSink(Tracer tracer, SinkFactory sinkFactory) { Sink baseSink = sinkFactory.getSink(); Sink sinkWithFailHandler = new SinkWithFailHandler(baseSink, errorHandler); Sink sinkWithRetry = withRetry(sinkWithFailHandler, errorHandler); - Sink sinWithDLQ = withDlq(sinkWithRetry, tracer, errorHandler); - return new SinkFinal(sinWithDLQ, new FirehoseInstrumentation(statsDReporter, SinkFinal.class)); + Sink sinkWithDLQ = withDlq(sinkWithRetry, tracer, errorHandler); + return new SinkFinal(sinkWithDLQ, new FirehoseInstrumentation(statsDReporter, SinkFinal.class)); } public Sink withDlq(Sink sink, Tracer tracer, ErrorHandler errorHandler) { diff --git a/src/main/java/io/odpf/firehose/sink/SinkFactory.java b/src/main/java/io/odpf/firehose/sink/SinkFactory.java index fa68edda8..00ee24e26 100644 --- a/src/main/java/io/odpf/firehose/sink/SinkFactory.java +++ b/src/main/java/io/odpf/firehose/sink/SinkFactory.java @@ -2,6 +2,7 @@ import io.odpf.depot.bigquery.BigQuerySink; import io.odpf.depot.bigquery.BigQuerySinkFactory; +import io.odpf.depot.config.BigQuerySinkConfig; import io.odpf.depot.log.LogSink; import io.odpf.depot.log.LogSinkFactory; import io.odpf.depot.metrics.StatsDReporter; @@ -21,6 +22,7 @@ import io.odpf.firehose.sink.prometheus.PromSinkFactory; import io.odpf.firehose.sink.redis.RedisSinkFactory; import io.odpf.stencil.client.StencilClient; +import org.aeonbits.owner.ConfigFactory; import java.util.Map; @@ -67,7 +69,10 @@ public void init() { return; case BIGQUERY: BigquerySinkUtils.addMetadataColumns(config); - bigQuerySinkFactory = new BigQuerySinkFactory(config, statsDReporter, BigquerySinkUtils.getRowIDCreator()); + bigQuerySinkFactory = new BigQuerySinkFactory( + ConfigFactory.create(BigQuerySinkConfig.class, config), + statsDReporter, + BigquerySinkUtils.getRowIDCreator()); bigQuerySinkFactory.init(); return; default: diff --git a/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java b/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java index f5a80ce7c..2c2b80925 100644 --- a/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java +++ b/src/main/java/io/odpf/firehose/sink/SinkFactoryUtils.java @@ -8,8 +8,9 @@ public class SinkFactoryUtils { protected static Map addAdditionalConfigsForSinkConnectors(Map env) { Map finalConfig = new HashMap<>(env); - finalConfig.put("SINK_CONNECTOR_SCHEMA_MESSAGE_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); - finalConfig.put("SINK_CONNECTOR_SCHEMA_KEY_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); + finalConfig.put("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); + finalConfig.put("SINK_CONNECTOR_SCHEMA_PROTO_KEY_CLASS", env.getOrDefault("INPUT_SCHEMA_PROTO_CLASS", "")); + finalConfig.put("SINK_CONNECTOR_SCHEMA_DATA_TYPE", env.getOrDefault("INPUT_SCHEMA_DATA_TYPE", "protobuf")); finalConfig.put("SINK_METRICS_APPLICATION_PREFIX", "firehose_"); finalConfig.put("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", env.getOrDefault("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "false")); finalConfig.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", diff --git a/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java b/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java index 595c71725..3923b5ad6 100644 --- a/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java +++ b/src/main/java/io/odpf/firehose/sink/dlq/log/LogDlqWriter.java @@ -20,8 +20,8 @@ public LogDlqWriter(FirehoseInstrumentation firehoseInstrumentation) { @Override public List write(List messages) throws IOException { for (Message message : messages) { - String key = new String(message.getLogKey()); - String value = new String(message.getLogMessage()); + String key = message.getLogKey() == null ? "" : new String(message.getLogKey()); + String value = message.getLogMessage() == null ? "" : new String(message.getLogMessage()); String error = ""; ErrorInfo errorInfo = message.getErrorInfo(); diff --git a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java index 60332800c..77a37aa70 100644 --- a/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java +++ b/src/main/java/io/odpf/firehose/sinkdecorator/SinkWithRetry.java @@ -16,6 +16,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static io.odpf.firehose.metrics.Metrics.RETRY_MESSAGES_TOTAL; import static io.odpf.firehose.metrics.Metrics.RETRY_ATTEMPTS_TOTAL; @@ -65,11 +66,27 @@ public List pushMessage(List inputMessages) throws IOException private void logDebug(List messageList) throws IOException { if (firehoseInstrumentation.isDebugEnabled()) { - List serializedBody = new ArrayList<>(); - for (Message message : messageList) { - serializedBody.add(parser.parse(message)); + switch (appConfig.getInputSchemaType()) { + case PROTOBUF: + List serializedBody = new ArrayList<>(); + for (Message message : messageList) { + serializedBody.add(parser.parse(message)); + } + firehoseInstrumentation.logDebug("Retry failed messages: \n{}", serializedBody.toString()); + break; + case JSON: + List messages = messageList.stream().map(m -> { + if (appConfig.getKafkaRecordParserMode().equals("key")) { + return new String(m.getLogKey()); + } else { + return new String(m.getLogMessage()); + } + }).collect(Collectors.toList()); + firehoseInstrumentation.logDebug("Retry failed messages: \n{}", messages.toString()); + break; + default: + throw new IllegalArgumentException("Unexpected value: " + appConfig.getInputSchemaType()); } - firehoseInstrumentation.logDebug("Retry failed messages: \n{}", serializedBody.toString()); } } diff --git a/src/test/java/io/odpf/firehose/config/converter/InputSchemaTypeConverterTest.java b/src/test/java/io/odpf/firehose/config/converter/InputSchemaTypeConverterTest.java new file mode 100644 index 000000000..2eb31b5e1 --- /dev/null +++ b/src/test/java/io/odpf/firehose/config/converter/InputSchemaTypeConverterTest.java @@ -0,0 +1,24 @@ +package io.odpf.firehose.config.converter; + +import io.odpf.firehose.config.enums.InputSchemaType; +import org.junit.Assert; +import org.junit.Test; + +public class InputSchemaTypeConverterTest { + + @Test + public void shouldConvertSchemaType() { + InputSchemaTypeConverter converter = new InputSchemaTypeConverter(); + InputSchemaType schemaType = converter.convert(null, "PROTOBUF"); + Assert.assertEquals(InputSchemaType.PROTOBUF, schemaType); + schemaType = converter.convert(null, "JSON"); + Assert.assertEquals(InputSchemaType.JSON, schemaType); + } + + @Test + public void shouldConvertSchemaTypeWithLowerCase() { + InputSchemaTypeConverter converter = new InputSchemaTypeConverter(); + InputSchemaType schemaType = converter.convert(null, " json "); + Assert.assertEquals(InputSchemaType.JSON, schemaType); + } +} diff --git a/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java b/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java index 3cf07d78f..d573bd992 100644 --- a/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java +++ b/src/test/java/io/odpf/firehose/sink/SinkFactoryUtilsTest.java @@ -15,8 +15,8 @@ public void shouldAddSinkConnectorConfigs() { put("INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "true"); }}; Map configs = SinkFactoryUtils.addAdditionalConfigsForSinkConnectors(env); - Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_MESSAGE_CLASS")); - Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_KEY_CLASS")); + Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS")); + Assert.assertEquals("com.test.SomeProtoClass", configs.get("SINK_CONNECTOR_SCHEMA_PROTO_KEY_CLASS")); Assert.assertEquals("firehose_", configs.get("SINK_METRICS_APPLICATION_PREFIX")); Assert.assertEquals("true", configs.get("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE")); Assert.assertEquals("LOG_MESSAGE", configs.get("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE")); diff --git a/src/test/java/io/odpf/firehose/sink/dlq/LogDlqWriterTest.java b/src/test/java/io/odpf/firehose/sink/dlq/LogDlqWriterTest.java new file mode 100644 index 000000000..1287f865d --- /dev/null +++ b/src/test/java/io/odpf/firehose/sink/dlq/LogDlqWriterTest.java @@ -0,0 +1,108 @@ +package io.odpf.firehose.sink.dlq; + +import io.odpf.depot.error.ErrorInfo; +import io.odpf.depot.error.ErrorType; +import io.odpf.firehose.message.Message; +import io.odpf.firehose.metrics.FirehoseInstrumentation; +import io.odpf.firehose.sink.dlq.log.LogDlqWriter; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class LogDlqWriterTest { + + @Mock + private FirehoseInstrumentation firehoseInstrumentation; + + private LogDlqWriter logDlqWriter; + + @Before + public void setUp() throws Exception { + logDlqWriter = new LogDlqWriter(firehoseInstrumentation); + } + + @Test + public void shouldWriteMessagesToLog() throws IOException { + long timestamp = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + Message message = new Message("123".getBytes(), "abc".getBytes(), "booking", 1, 1, null, timestamp, timestamp, new ErrorInfo(new IOException("test"), ErrorType.DESERIALIZATION_ERROR)); + + String key = new String(message.getLogKey()); + String value = new String(message.getLogMessage()); + ErrorInfo errorInfo = message.getErrorInfo(); + String error = ExceptionUtils.getStackTrace(errorInfo.getException()); + + List messages = Collections.singletonList(message); + Assert.assertEquals(0, logDlqWriter.write(messages).size()); + + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("key: {}\nvalue: {}\nerror: {}", key, value, error); + } + + @Test + public void shouldWriteMessagesToLogWhenKeyIsNull() throws IOException { + long timestamp = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + Message message = new Message(null, "abc".getBytes(), "booking", 1, 1, null, timestamp, timestamp, new ErrorInfo(new IOException("test"), ErrorType.DESERIALIZATION_ERROR)); + + String value = new String(message.getLogMessage()); + ErrorInfo errorInfo = message.getErrorInfo(); + String error = ExceptionUtils.getStackTrace(errorInfo.getException()); + + List messages = Collections.singletonList(message); + Assert.assertEquals(0, logDlqWriter.write(messages).size()); + + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("key: {}\nvalue: {}\nerror: {}", "", value, error); + } + + @Test + public void shouldWriteMessagesToLogWhenValueIsNull() throws IOException { + long timestamp = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + Message message = new Message("123".getBytes(), null, "booking", 1, 1, null, timestamp, timestamp, new ErrorInfo(new IOException("test"), ErrorType.DESERIALIZATION_ERROR)); + + String key = new String(message.getLogKey()); + ErrorInfo errorInfo = message.getErrorInfo(); + String error = ExceptionUtils.getStackTrace(errorInfo.getException()); + + List messages = Collections.singletonList(message); + Assert.assertEquals(0, logDlqWriter.write(messages).size()); + + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("key: {}\nvalue: {}\nerror: {}", key, "", error); + } + + @Test + public void shouldWriteMessagesToLogWhenErrorInfoIsNull() throws IOException { + long timestamp = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + Message message = new Message("123".getBytes(), "abc".getBytes(), "booking", 1, 1, null, timestamp, timestamp, null); + + String key = new String(message.getLogKey()); + String value = new String(message.getLogMessage()); + + List messages = Collections.singletonList(message); + Assert.assertEquals(0, logDlqWriter.write(messages).size()); + + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("key: {}\nvalue: {}\nerror: {}", key, value, ""); + } + + @Test + public void shouldWriteMessagesToLogWhenErrorInfoExceptionIsNull() throws IOException { + long timestamp = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + Message message = new Message("123".getBytes(), "abc".getBytes(), "booking", 1, 1, null, timestamp, timestamp, new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); + + String key = new String(message.getLogKey()); + String value = new String(message.getLogMessage()); + + List messages = Collections.singletonList(message); + Assert.assertEquals(0, logDlqWriter.write(messages).size()); + + Mockito.verify(firehoseInstrumentation, Mockito.times(1)).logInfo("key: {}\nvalue: {}\nerror: {}", key, value, ""); + } +} diff --git a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java index 2da50b544..f36f79999 100644 --- a/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java +++ b/src/test/java/io/odpf/firehose/sinkdecorator/SinkWithRetryTest.java @@ -4,6 +4,7 @@ import io.odpf.depot.error.ErrorType; import io.odpf.firehose.config.AppConfig; import io.odpf.firehose.config.ErrorConfig; +import io.odpf.firehose.config.enums.InputSchemaType; import io.odpf.firehose.message.Message; import io.odpf.firehose.error.ErrorHandler; import io.odpf.firehose.exception.DeserializerException; @@ -128,6 +129,7 @@ public void shouldRetryUntilSuccess() throws IOException, DeserializerException @Test public void shouldLogRetriesMessages() throws IOException, DeserializerException { when(appConfig.getRetryMaxAttempts()).thenReturn(10); + when(appConfig.getInputSchemaType()).thenReturn(InputSchemaType.PROTOBUF); ArrayList messages = new ArrayList<>(); messages.add(message); messages.add(message); @@ -149,6 +151,33 @@ public void shouldLogRetriesMessages() throws IOException, DeserializerException verify(firehoseInstrumentation, times(5)).logDebug("Retry failed messages: \n{}", "[null, null]"); } + @Test + public void shouldLogRetriesMessagesForJsonInput() throws IOException, DeserializerException { + when(appConfig.getRetryMaxAttempts()).thenReturn(10); + when(appConfig.getInputSchemaType()).thenReturn(InputSchemaType.JSON); + when(appConfig.getKafkaRecordParserMode()).thenReturn("message"); + when(message.getLogMessage()).thenReturn("testing message".getBytes()); + ArrayList messages = new ArrayList<>(); + messages.add(message); + messages.add(message); + when(message.getErrorInfo()).thenReturn(new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR)); + when(firehoseInstrumentation.isDebugEnabled()).thenReturn(true); + when(sinkDecorator.pushMessage(anyList())).thenReturn(messages).thenReturn(messages).thenReturn(messages) + .thenReturn(messages).thenReturn(messages).thenReturn(new ArrayList<>()); + SinkWithRetry sinkWithRetry = new SinkWithRetry(sinkDecorator, backOffProvider, firehoseInstrumentation, appConfig, parser, errorHandler); + + List messageList = sinkWithRetry.pushMessage(Collections.singletonList(message)); + assertTrue(messageList.isEmpty()); + verify(firehoseInstrumentation, times(1)).logInfo("Maximum retry attempts: {}", 10); + verify(firehoseInstrumentation, times(5)).incrementCounter("firehose_retry_attempts_total"); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 1, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 2, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 3, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 4, 2); + verify(firehoseInstrumentation, times(1)).logInfo("Retrying messages attempt count: {}, Number of messages: {}", 5, 2); + verify(firehoseInstrumentation, times(5)).logDebug("Retry failed messages: \n{}", "[testing message, testing message]"); + } + @Test public void shouldAddInstrumentationForRetry() throws Exception { when(appConfig.getRetryMaxAttempts()).thenReturn(3); From cb147ac8b7424a243026939746e866e229def548 Mon Sep 17 00:00:00 2001 From: kevinbheda Date: Fri, 5 Aug 2022 11:20:19 +0530 Subject: [PATCH 7/9] docs: update docs for bigquery sink json support (#183) --- docs/docs/concepts/architecture.md | 16 +++++++--- docs/docs/contribute/development.md | 3 +- docs/docs/guides/create_firehose.md | 7 +++-- docs/docs/introduction.md | 5 +++ docs/docs/reference/core-faqs.md | 4 +-- docs/docs/reference/faq.md | 5 +-- docs/docs/sinks/bigquery-sink.md | 48 +++++++++++++++++++++++++++-- 7 files changed, 73 insertions(+), 15 deletions(-) diff --git a/docs/docs/concepts/architecture.md b/docs/docs/concepts/architecture.md index ead996e16..8b42bda82 100644 --- a/docs/docs/concepts/architecture.md +++ b/docs/docs/concepts/architecture.md @@ -71,14 +71,20 @@ The final state of message can be any one of the followings after it is consumed One can monitor via plotting the metrics related to messages. ### Schema Handling +- Incase when `INPUT_SCHEMA_DATA_TYPE is set to protobuf` + - Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. Data streams on Kafka topics are bound to a protobuf schema. + - Firehose deserializes the data consumed from the topics using the Protobuf descriptors generated out of the artifacts. The artifactory is an HTTP interface that Firehose uses to deserialize. + - The schema handling ie., find the mapped schema for the topic, downloading the descriptors, and dynamically being notified of/updating with the latest schema is abstracted through the Stencil library. -- Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. Data streams on Kafka topics are bound to a protobuf schema. -- Firehose deserializes the data consumed from the topics using the Protobuf descriptors generated out of the artifacts. The artifactory is an HTTP interface that Firehose uses to deserialize. -- The schema handling ie., find the mapped schema for the topic, downloading the descriptors, and dynamically being notified of/updating with the latest schema is abstracted through the Stencil library. + The Stencil is a proprietary library that provides an abstraction layer, for schema handling. - The Stencil is a proprietary library that provides an abstraction layer, for schema handling. + Schema Caching, dynamic schema updates, etc. are features of the stencil client library. - Schema Caching, dynamic schema updates, etc. are features of the stencil client library. +- Incase when `INPUT_SCHEMA_DATA_TYPE is set to json` + - Currently this config is only supported in Bigquery sink, + - For json, in bigquery sink the schema is dynamically inferred from incoming data, in future we plan to provide json schema support via stencil. + + ## Firehose Integration diff --git a/docs/docs/contribute/development.md b/docs/docs/contribute/development.md index 063cc7d66..9f561c47c 100644 --- a/docs/docs/contribute/development.md +++ b/docs/docs/contribute/development.md @@ -37,7 +37,7 @@ Configuration parameter variables of each sink can be found in the [Configuratio ### Schema Registry -Firehose uses Stencil Server as its Schema Registry for hosting Protobuf descriptors. The environment variable `SCHEMA_REGISTRY_STENCIL_ENABLE` must be set to `true` . Stencil server URL must be specified in the variable `SCHEMA_REGISTRY_STENCIL_URLS` . The Proto Descriptor Set file of the Kafka messages must be uploaded to the Stencil server. +When `INPUT_SCHEMA_DATA_TYPE is set to protobuf`, firehose uses Stencil Server as its Schema Registry for hosting Protobuf descriptors. The environment variable `SCHEMA_REGISTRY_STENCIL_ENABLE` must be set to `true` . Stencil server URL must be specified in the variable `SCHEMA_REGISTRY_STENCIL_URLS` . The Proto Descriptor Set file of the Kafka messages must be uploaded to the Stencil server. Refer [this guide](https://github.com/odpf/stencil/tree/master/server#readme) on how to set up and configure the Stencil server, and how to generate and upload Proto descriptor set file to the server. @@ -71,6 +71,7 @@ Set the generic variables in the local.properties file. ```text KAFKA_RECORD_PARSER_MODE = message SINK_TYPE = log +INPUT_SCHEMA_DATA_TYPE=protobuf INPUT_SCHEMA_PROTO_CLASS = io.odpf.firehose.consumer.TestMessage ``` Set the variables which specify the kafka server, topic name, and group-id of the kafka consumer - the standard values are used here. diff --git a/docs/docs/guides/create_firehose.md b/docs/docs/guides/create_firehose.md index 39825df3f..d2855d9d3 100644 --- a/docs/docs/guides/create_firehose.md +++ b/docs/docs/guides/create_firehose.md @@ -14,6 +14,7 @@ SOURCE_KAFKA_TOPIC=test-topic KAFKA_RECOED_CONSUMER_GROUP_ID=sample-group-id KAFKA_RECORD_PARSER_MODE=message SINK_TYPE=log +INPUT_SCHEMA_DATA_TYPE=protobuf INPUT_SCHEMA_PROTO_CLASS=com.tests.TestMessage ``` @@ -129,8 +130,10 @@ _**Note:**_ [_**DATABASE**_](../sinks/influxdb-sink.md#sink_influx_db_name) _**a ## Create a Bigquery sink - it requires the following [variables](../sinks/bigquery-sink.md) to be set. -- This sink will generate bigquery schema from protobuf message schema and update bigquery table with the latest generated schema. -- The protobuf message of a `google.protobuf.Timestamp` field might be needed when table partitioning is enabled. +- For INPUT_SCHEMA_DATA_TYPE = protobuf, this sink will generate bigquery schema from protobuf message schema and update bigquery table with the latest generated schema. + - The protobuf message of a `google.protobuf.Timestamp` field might be needed when table partitioning is enabled. +- For INPUT_SCHEMA_DATA_TYPE = json, this sink will generate bigquery schema by infering incoming json. In future we will add support for json schema as well coming from stencil. + - The timestamp column is needed incase of partition table. It can be generated at the time of ingestion by setting the config. Please refer to config `SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE` in [depot bigquery sink config section](https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md#sink_bigquery_add_event_timestamp_enable) - Google cloud credential with some bigquery permission is required to run this sink. If you'd like to connect to a sink which is not yet supported, you can create a new sink by following the [contribution guidelines](../contribute/contribution.md) diff --git a/docs/docs/introduction.md b/docs/docs/introduction.md index 52adc8ade..ee3de4026 100644 --- a/docs/docs/introduction.md +++ b/docs/docs/introduction.md @@ -19,6 +19,11 @@ Discover why users choose Firehose as their main Kafka Consumer - **Runtime** Firehose can run inside containers or VMs in a fully managed runtime environment like Kubernetes. - **Metrics** Always know what’s going on with your deployment with built-in monitoring of throughput, response times, errors, and more. +## Supported Incoming data types from kafka +- [Protobuf](https://developers.google.com/protocol-buffers) +- [JSON](https://www.json.org/json-en.html) + - Supported limited to bigquery, elastic and mongo sink. In future support to other sinks will be added + ## Supported Sinks: Following sinks are supported in the Firehose diff --git a/docs/docs/reference/core-faqs.md b/docs/docs/reference/core-faqs.md index cff2b7c4c..96c044560 100644 --- a/docs/docs/reference/core-faqs.md +++ b/docs/docs/reference/core-faqs.md @@ -49,7 +49,7 @@ Firehose provides various Kafka client configurations. Refer [Generic Configurat ## What all data formats are supported ? -Elasticsearch and MongoDB sink support both JSON and Protobuf as the input schema. For other sinks, we currently support only Protobuf. Support for JSON and Avro is planned and incorporated in our roadmap. Please refer to our Roadmap section for more details. +Elasticsearch , Bigquery and MongoDB sink support both JSON and Protobuf as the input schema. For other sinks, we currently support only Protobuf. Support for JSON and Avro is planned and incorporated in our roadmap. Please refer to our Roadmap section for more details. Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. Data streams on Kafka topics are bound to a Protobuf schema. @@ -146,7 +146,7 @@ No, all fields from the input key/message will be sent by Firehose to the Sink. Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. Data streams on Kafka topics are bound to a Protobuf schema. Protobuf is much more lightweight that other schema formats like JSON, since it encodes the keys in the message to integers. -Elasticsearch and MongoDB sink support both JSON and Protobuf as the input schema. +Elasticsearch, Bigquery and MongoDB sink support both JSON and Protobuf as the input schema. For other sinks, we currently support only Protobuf. Support for JSON and Avro is planned and incorporated in our roadmap. Please refer to our Roadmap section for more details. diff --git a/docs/docs/reference/faq.md b/docs/docs/reference/faq.md index 5b1824b3b..9b8911b26 100644 --- a/docs/docs/reference/faq.md +++ b/docs/docs/reference/faq.md @@ -771,15 +771,16 @@ section. #### What all data formats are supported? -ElasticSearch and MongoDB sink support both JSON and Protobuf as the input schema. For other sinks, we currently support +ElasticSearch, Bigquery and MongoDB sink support both JSON and Protobuf as the input schema. For other sinks, we currently support only Protobuf. Support for JSON and Avro is planned and incorporated in the roadmap. Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serialising structured data. -Data streams on Kafka topics are bound to a Protobuf schema. Follow the instructions +When `INPUT_SCHEMA_DATA_TYPE=protobuf` Data streams on Kafka topics are bound to a Protobuf schema. Follow the instructions in [this article](https://developers.google.com/protocol-buffers/docs/javatutorial) on how to create, compile and serialize a Protobuf object to send it to a binary OutputStream. Refer [this guide](https://developers.google.com/protocol-buffers/docs/proto3) for detailed Protobuf syntax and rules to create a `.proto` file. +When `INPUT_SCHEMA_DATA_TYPE=json` data streams on kafka topics are bound to having a valid json message. #### Can we select particular fields from the input message? diff --git a/docs/docs/sinks/bigquery-sink.md b/docs/docs/sinks/bigquery-sink.md index 1f8989dcf..1d5f32280 100644 --- a/docs/docs/sinks/bigquery-sink.md +++ b/docs/docs/sinks/bigquery-sink.md @@ -1,6 +1,48 @@ # BigQuery -A Bigquery sink Firehose \(`SINK_TYPE`=`bigquery`\) requires env variables to be set along with Generic ones and -env variables in depot repository. The Firehose sink uses bigquery implementation available [depot](https://github.com/odpf/depot) repository. +Bigquery Sink has several responsibilities, first creation of bigquery table and dataset when they are not exist, second update the bigquery table schema based on the latest schema defined in stencil or infer from incoming data, third translate incoming messages into bigquery records and insert them to bigquery tables. +Bigquery utilise Bigquery [Streaming API](https://cloud.google.com/bigquery/streaming-data-into-bigquery) to insert record into bigquery tables. For more info on the sink refer to [Depot Bigquery sink documentation](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md) + +## Asynchronous consumer mode + +Bigquery Streaming API limits size of payload sent for each insert operations. The limitation reduces the amount of message allowed to be inserted when the message size is big. +This will reduce the throughput of bigquery sink. To increase the throughput, firehose provide kafka consumer asynchronous mode. +In asynchronous mode sink operation is executed asynchronously, so multiple sink task can be scheduled and run concurrently. +Throughput can be increased by increasing the number of sink pool. + +## At Least Once Guarantee + +Because of asynchronous consumer mode and the possibility of retry on the insert operation. There is no guarantee of the message order that successfully sent to the sink. +That also happened with commit offset, the there is no order of the offset number of the processed messages. +Firehose collect all the offset sort them and only commit the latest continuous offset. +This will ensure all the offset being committed after messages successfully processed even when some messages are being re processed by retry handler or when the insert operation took a long time. + +## Configurations +For Bigquery sink in Firehose we need to set first \(`SINK_TYPE`=`bigquery`\). There are some generic configs which are common across different sink types which need to be set example: kafka consumer configs, the generic ones are mentioned in [generic.md](../advance/generic.md). Bigquery sink specific configs are mentioned in depot [Depot-configuration/bigquery-sink.md section](https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md) + + +## Bigquery table schema update +Refer to [Depot-bigquery.md#bigquery-table-schema-update section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#bigquery-table-schema-update) + +## Protobuf and BigQuery table type mapping +For type conversion between protobuf to bigquery type. Please refer to +[Depot-bigquery.md#protobuf-bigquery-table-type-mapping section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#protobuf---bigquery-table-type-mapping) + +## Partitioning +Bigquery Sink supports creation of table with partition configuration. +For more information refer to [Depot-bigquery.md#partitioning section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#partitioning) + +## Kafka Metadata +For data quality checking purpose sometimes kafka metadata need to be added on the record. For more information refer to [Depot-bigquery.md#metadata sectionn](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#metadata) + +## Default columns for json data type +With dynamic schema for json we need to create table with some default columns, example like parition key needs to be set during creation of the table. Sample config `SINK_BIGQUERY_DEFAULT_COLUMNS =event_timestamp=timestamp`. For more information refer to [Depot-bigquery.md#default-columns-for-json-data-type section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#default-columns-for-json-data-type) + +## Error handling +The response can contain multiple errors which will be sent to the firehose from depot. Please refer to [Depot-bigquery.md#errors-handling section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#errors-handling) + + +## Google Cloud Bigquery IAM Permission +Several IAM permission is required for bigquery sink to run properly. For more detail refer to [Depot-bigquery.md#google-cloud-bigquery-iam-permission section](https://github.com/odpf/depot/blob/main/docs/sinks/bigquery.md#google-cloud-bigquery-iam-permission) + -[Configuration of Bigquery Sink] (https://github.com/odpf/depot/blob/main/docs/reference/configuration/bigquery-sink.md) \ No newline at end of file From fe6c222e39c38e39acc83371651ec6aa77b4dae1 Mon Sep 17 00:00:00 2001 From: "kevin.bheda" Date: Fri, 5 Aug 2022 12:08:20 +0530 Subject: [PATCH 8/9] chore: version bump for json and protobuf bigquery sink from depot --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index a1f88e226..3fddaa1ed 100644 --- a/build.gradle +++ b/build.gradle @@ -33,7 +33,7 @@ lombok { } group 'io.odpf' -version '0.3.3' +version '0.3.4' def projName = "firehose" @@ -101,7 +101,7 @@ dependencies { implementation 'com.google.cloud:google-cloud-storage:1.114.0' implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' implementation 'org.apache.logging.log4j:log4j-core:2.17.1' - implementation group: 'io.odpf', name: 'depot', version: '0.1.6' + implementation group: 'io.odpf', name: 'depot', version: '0.1.7' implementation group: 'com.networknt', name: 'json-schema-validator', version: '1.0.59' exclude group: 'org.slf4j' testImplementation group: 'junit', name: 'junit', version: '4.11' From c6f2a46dc9160beaf038122e3e4510d089fd1e21 Mon Sep 17 00:00:00 2001 From: "kevin.bheda" Date: Fri, 5 Aug 2022 13:50:06 +0530 Subject: [PATCH 9/9] chore: version bump depot introduction and json support for bigquery --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 3fddaa1ed..81859a6bb 100644 --- a/build.gradle +++ b/build.gradle @@ -33,7 +33,7 @@ lombok { } group 'io.odpf' -version '0.3.4' +version '0.4.0' def projName = "firehose"