Skip to content

Commit

Permalink
Assemble the apps using Docker Compose (closes #23)
Browse files Browse the repository at this point in the history
  • Loading branch information
oguzhanunlu committed Aug 1, 2018
1 parent 177b166 commit 72f5460
Show file tree
Hide file tree
Showing 45 changed files with 797 additions and 1,606 deletions.
5 changes: 3 additions & 2 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Vagrant.configure("2") do |config|

config.vm.network "forwarded_port", guest: 80, host: 2000
config.vm.network "forwarded_port", guest: 3000, host: 3000
config.vm.network "forwarded_port", guest: 4171, host: 4171
config.vm.network "forwarded_port", guest: 8080, host: 8080
config.vm.network "forwarded_port", guest: 9200, host: 9200
config.vm.network "forwarded_port", guest: 5601, host: 5601
Expand All @@ -20,8 +21,8 @@ Vagrant.configure("2") do |config|
vb.name = Dir.pwd().split("/")[-1] + "-" + Time.now.to_f.to_i.to_s
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
vb.customize [ "guestproperty", "set", :id, "--timesync-threshold", 10000 ]
vb.memory = 4096
vb.cpus = 1
vb.memory = 8192
vb.cpus = 2
end

config.vm.provision :shell do |sh|
Expand Down
11 changes: 1 addition & 10 deletions integration/integration_test.sh
Original file line number Diff line number Diff line change
@@ -1,22 +1,13 @@
#!/bin/bash

sudo service elasticsearch start
sudo service iglu_server_0.2.0 start
sudo service snowplow_stream_collector start
sudo service snowplow_stream_enrich start
sudo service snowplow_elasticsearch_loader_good start
sudo service snowplow_elasticsearch_loader_bad start
sudo service kibana4_init start
sleep 15

# Send good and bad events
COUNTER=0
while [ $COUNTER -lt 10 ]; do
curl http://localhost:8080/i?e=pv
curl http://localhost:8080/i
let COUNTER=COUNTER+1
done
sleep 60
sleep 90

# Assertions
good_count="$(curl --silent -XGET 'http://localhost:9200/good/good/_count' | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["count"]')"
Expand Down
23 changes: 19 additions & 4 deletions provisioning/resources/configs/Caddyfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
/home
/kibana
/elasticsearch
/nsqadmin
/control-plane
/_plugin
}
redir /home /home/
redir /kibana /kibana/
redir /iglu-server /iglu-server/
redir /kibana /kibana/
redir /nsqadmin /nsqadmin/
redir /elasticsearch /elasticsearch/

proxy / localhost:8080

Expand All @@ -20,16 +22,29 @@
proxy /kibana localhost:5601 {
without /kibana
}
proxy /app/kibana localhost:5601
proxy /app/timelion localhost:5601
proxy /bundles localhost:5601
proxy /plugins localhost:5601
proxy /ui localhost:5601
proxy /api localhost:5601

proxy /iglu-server localhost:8081 {
without /iglu-server
}
proxy /api localhost:8081
proxy /api-docs localhost:8081

proxy /nsqadmin localhost:4171 {
without /nsqadmin
}
proxy /static localhost:4171
proxy /api/counter localhost:4171
proxy /api/nodes localhost:4171
proxy /api/topics localhost:4171

proxy /elasticsearch localhost:9200 {
without /elasticsearch
}
proxy /_plugin localhost:9200

proxy /control-plane localhost:10000 {
without /control-plane
Expand Down
12 changes: 6 additions & 6 deletions provisioning/resources/configs/control-plane-api.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ caddy = "Caddyfile"
iglu_resolver = "iglu-resolver.json"

[init_scripts]
stream_collector = "snowplow_stream_collector"
stream_enrich = "snowplow_stream_enrich"
es_loader_good = "snowplow_elasticsearch_loader_good"
es_loader_bad = "snowplow_elasticsearch_loader_bad"
iglu = "iglu_server_0.2.0"
stream_collector = "scala-stream-collector"
stream_enrich = "stream-enrich"
es_loader_good = "elasticsearch-loader-good"
es_loader_bad = "elasticsearch-loader-bad"
iglu = "iglu-server"
caddy = "caddy_init"

[PSQL]
user = "snowplow"
password = "snowplow"
database = "iglu"
adddress = "127.0.0.1:5432"
address = "127.0.0.1:5433"
2 changes: 1 addition & 1 deletion provisioning/resources/configs/iglu-resolver.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
],
"connection": {
"http": {
"uri": "http://localhost:8081/api",
"uri": "http://iglu-server:8081/api",
"apikey": "PLACEHOLDER"
}
}
Expand Down
16 changes: 11 additions & 5 deletions provisioning/resources/configs/iglu-server.conf
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
# Copyright (c) 2014-2018 Snowplow Analytics Ltd. All rights reserved.
#
# This program is licensed to you under the Apache License Version 2.0, and
# you may not use this file except in compliance with the Apache License
Expand All @@ -15,15 +15,19 @@
# the Iglu repository server.

# 'repo-server' contains configuration options for the repo-server.
# interface on which the server will be running
# baseURL is address of deployment, "<ip>:<port>/<deployment_path>" address used for baseURL of Swagger UI
# port on which the server will be running
repo-server {
interface = "0.0.0.0"
baseURL = "0.0.0.0/iglu-server"
port = 8081
}

# 'postgres' contains configuration options for the postgre instance the server
# is using
postgres {
host = "localhost"
host = "postgres"
port = 5432
dbname = "iglu"
username = "snowplow"
Expand All @@ -32,14 +36,16 @@ postgres {
}

akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
loglevel = INFO
log-dead-letters = off
stdout-loglevel = "DEBUG"
logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
}

# spray-can is the HTTP server the Iglu repository server is built on.
spray.can {
akka.http {
server {
request-timeout = 10s
request-timeout = 10 seconds
remote-address-header = on
parsing.uri-parsing-mode = relaxed
}
Expand Down
78 changes: 6 additions & 72 deletions provisioning/resources/configs/snowplow-es-loader-bad.hocon
Original file line number Diff line number Diff line change
Expand Up @@ -14,93 +14,40 @@
# This file (config.hocon.sample) contains a template with
# configuration options for the Elasticsearch Loader.

# Sources currently supported are:
# "kinesis" for reading records from a Kinesis stream
# "stdin" for reading unencoded tab-separated events from stdin
# If set to "stdin", JSON documents will not be sent to Elasticsearch
# but will be written to stdout.
# "nsq" for reading unencoded tab-separated events from NSQ
source = nsq

# Where to write good and bad records
sink {
# Sinks currently supported are:
# "elasticsearch" for writing good records to Elasticsearch
# "stdout" for writing good records to stdout
good = elasticsearch

# Sinks currently supported are:
# "kinesis" for writing bad records to Kinesis
# "stderr" for writing bad records to stderr
# "nsq" for writing bad records to NSQ
# "none" for ignoring bad records
bad = none
}

# "good" for a stream of successfully enriched events
# "bad" for a stream of bad events
# "plain-json" for writing plain json
enabled = bad

# The following are used to authenticate for the Amazon Kinesis sink.
#
# If both are set to "default", the default provider chain is used
# (see http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html)
#
# If both are set to "iam", use AWS IAM Roles to provision credentials.
#
# If both are set to "env", use environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
aws {
accessKey: ""
secretKey: ""
}

# config for NSQ
nsq {
# Channel name for NSQ source
channelName = ESLoaderChannelBad

nsqdHost = "nsqd"
nsqdPort = 4150

# Host name for NSQ tools
host = "127.0.0.1"

# TCP port for nsqd
port = 4150

# HTTP port for nsqlookupd
lookupPort = 4161
nsqlookupdHost = "nsqlookupd"
nsqlookupdPort = 4161
}

kinesis {
# "LATEST": most recent data.
# "TRIM_HORIZON": oldest available data.
# "AT_TIMESTAMP": Start from the record at or after the specified timestamp
# Note: This only affects the first run of this application on a stream.
initialPosition= TRIM_HORIZON

# Maximum number of records to get from Kinesis per call to GetRecords
maxRecords = 1000

# Region where the Kinesis stream is located
region = ""

# "appName" is used for a DynamoDB table to maintain stream state.
# You can set it automatically using: "SnowplowElasticsearchSink-${sink.kinesis.in.stream-name}"
appName = ""
}

# Common configuration section for all stream sources
streams {
inStreamName = BadEnrichedEvents

# Stream for enriched events which are rejected by Elasticsearch
outStreamName = BadElasticsearchEvents

# Events are accumulated in a buffer before being sent to Elasticsearch.
# Note: Buffering is not supported by NSQ; will be ignored
# The buffer is emptied whenever:
# - the combined size of the stored records exceeds byteLimit or
# - the number of stored records exceeds recordLimit or
# - the time in milliseconds since it was last emptied exceeds timeLimit
buffer {
byteLimit = 5242880
recordLimit = 1
Expand All @@ -110,31 +57,18 @@ streams {

elasticsearch {

# Events are indexed using an Elasticsearch Client
# - endpoint: the cluster endpoint
# - port: the port the cluster can be accessed on
# - for http this is usually 9200
# - for transport this is usually 9300
# - max-timeout: the maximum attempt time before a client restart
# - ssl: if using the http client, whether to use ssl or not
client {
endpoint = "localhost"
endpoint = elasticsearch
port = 9200
maxTimeout = 10000
ssl = false
}

# When using the AWS ES service
# - signing: if using the http client and the AWS ES service you can sign your requests
# http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html
# - region where the AWS ES service is located
aws {
signing = false
region = ""
}

# index: the Elasticsearch index name
# type: the Elasticsearch index type
cluster {
name = elasticsearch
index = bad
Expand Down
Loading

0 comments on commit 72f5460

Please sign in to comment.