Browse Source

Configuration for a centralized logging monitoring.

Lukas Cerny 6 years ago
parent
commit
3c57539a77
6 changed files with 280 additions and 25 deletions
  1. 0 15
      Dockerfile
  2. 22 7
      connector-common/src/main/resources/log4j2.xml
  3. 4 3
      docker-compose.yaml
  4. 21 0
      docker/Dockerfile
  5. 215 0
      docker/filebeat.yml
  6. 18 0
      docker/start.sh

+ 0 - 15
Dockerfile

@@ -1,15 +0,0 @@
-FROM zenika/alpine-maven:3-jdk8
-
-ARG MAVEN_PROFILE
-
-COPY . /app
-WORKDIR /app
-
-RUN mvn clean
-RUN mvn package -P $MAVEN_PROFILE
-
-ENTRYPOINT if [ "$DEBUG" = "true" ] ; then \
-        java -cp "bin/*" -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=0.0.0.0:5005 cz.senslog.connector.app.Main $APP_PARAMS \
-    ; else \
-        java -cp "bin/*" cz.senslog.connector.app.Main $APP_PARAMS \
-    ; fi

+ 22 - 7
connector-common/src/main/resources/log4j2.xml

@@ -1,20 +1,35 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <?xml version="1.0" encoding="UTF-8"?>
 <Configuration xmlns="http://logging.apache.org/log4j/2.0/config" status="INFO">
 <Configuration xmlns="http://logging.apache.org/log4j/2.0/config" status="INFO">
+
+    <Properties>
+        <Property name="logPath">./</Property>
+    </Properties>
+
     <Appenders>
     <Appenders>
-        <!-- Console Appender -->
-        <Console name="STDOUT" target="SYSTEM_OUT">
+
+        <Console name="console" target="SYSTEM_OUT">
             <PatternLayout pattern="%-5p | %d{yyyy-MM-dd HH:mm:ss} | [%t] %C{2} (%F:%L) - %m%n" />
             <PatternLayout pattern="%-5p | %d{yyyy-MM-dd HH:mm:ss} | [%t] %C{2} (%F:%L) - %m%n" />
         </Console>
         </Console>
 
 
-        <File name="FileLog" fileName="logs/all.log" immediateFlush="false" append="false">
-            <PatternLayout pattern="%-5p | %d{yyyy-MM-dd HH:mm:ss} | [%t] %C{2} (%F:%L) - %m%n" />
-        </File>
+        <RollingFile name="filebeat"
+                     fileName="${sys:logPath}/app.log"
+                     filePattern="${sys:logPath}/app.%i.log.gz"
+        >
+            <PatternLayout alwaysWriteExceptions="false"
+                    pattern='{"app.date":"%d{ISO8601}","app.thread":"%t","app.level":"%level","app.logger":"%logger:%L", "app.exception":"%enc{%ex}{JSON}", "app.message":"%msg"}%n'
+            />
+            <Policies>
+<!--                <TimeBasedTriggeringPolicy interval="1"/> &lt;!&ndash; Number of days for a log file &ndash;&gt;-->
+                <SizeBasedTriggeringPolicy size="10MB" />
+            </Policies>
+        </RollingFile>
+
     </Appenders>
     </Appenders>
     <Loggers>
     <Loggers>
         <Logger name="cz.senslog" level="info" />
         <Logger name="cz.senslog" level="info" />
         <Root level="info">
         <Root level="info">
-            <AppenderRef ref="STDOUT" />
-            <AppenderRef ref="FileLog" />
+            <AppenderRef ref="console" />
+            <AppenderRef ref="filebeat" />
         </Root>
         </Root>
     </Loggers>
     </Loggers>
 </Configuration>
 </Configuration>

+ 4 - 3
docker-compose.yaml

@@ -5,7 +5,7 @@ services:
   lws1:
   lws1:
     container_name: loraWanSenslog1
     container_name: loraWanSenslog1
     build:
     build:
-      dockerfile: Dockerfile
+      dockerfile: docker/Dockerfile
       context: .
       context: .
       args:
       args:
         MAVEN_PROFILE: LoraWanSenslog1
         MAVEN_PROFILE: LoraWanSenslog1
@@ -16,7 +16,7 @@ services:
   fcs2:
   fcs2:
     container_name: fieldclimateSenslog2
     container_name: fieldclimateSenslog2
     build:
     build:
-      dockerfile: Dockerfile
+      dockerfile: docker/Dockerfile
       context: .
       context: .
       args:
       args:
          MAVEN_PROFILE: FieldClimateSenslog2
          MAVEN_PROFILE: FieldClimateSenslog2
@@ -25,4 +25,5 @@ services:
     restart: always
     restart: always
     environment:
     environment:
         APP_PARAMS: -cf config/test.yaml
         APP_PARAMS: -cf config/test.yaml
-        DEBUG: "true"
+        DEBUG: "false"
+        LOG_MONITOR: "false"

+ 21 - 0
docker/Dockerfile

@@ -0,0 +1,21 @@
+FROM zenika/alpine-maven:3-jdk8
+
+ARG MAVEN_PROFILE
+ENV BUILD_PROFILE $MAVEN_PROFILE
+
+COPY docker/filebeat.yml /etc/conf.d/
+COPY docker/start.sh /app/
+
+#COPY ["./connector-*", "/app/"]
+COPY . /app/
+
+WORKDIR /app
+
+RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories
+RUN apk update
+RUN apk add filebeat
+
+RUN mvn clean
+RUN mvn package -P $MAVEN_PROFILE -DskipTests=true
+
+ENTRYPOINT ["/bin/sh", "-C", "start.sh"]

+ 215 - 0
docker/filebeat.yml

@@ -0,0 +1,215 @@
+###################### Filebeat Configuration Example #########################
+
+# This file is an example configuration file highlighting only the most common
+# options. The filebeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/filebeat/index.html
+
+# For more available modules and options, please see the filebeat.reference.yml sample
+# configuration file.
+
+#=========================== Filebeat inputs =============================
+
+filebeat.inputs:
+
+# Each - is an input. Most options can be set at the input level, so
+# you can use different inputs for various configurations.
+# Below are the input specific configurations.
+
+- type: log
+
+  # Change to true to enable this input configuration.
+  enabled: true
+
+  # Paths that should be crawled and fetched. Glob based paths.
+  paths:
+    - /var/log/connector-app/*.log
+    #- c:\programdata\elasticsearch\logs\*
+
+  # Exclude lines. A list of regular expressions to match. It drops the lines that are
+  # matching any regular expression from the list.
+  #exclude_lines: ['^DBG']
+
+  # Include lines. A list of regular expressions to match. It exports the lines that are
+  # matching any regular expression from the list.
+  #include_lines: ['^ERR', '^WARN']
+
+  # Exclude files. A list of regular expressions to match. Filebeat drops the files that
+  # are matching any regular expression from the list. By default, no files are dropped.
+  #exclude_files: ['.gz$']
+
+  # Optional additional fields. These fields can be freely picked
+  # to add additional information to the crawled log files for filtering
+  fields:
+     tags: ['json']
+  #  level: debug
+  #  review: 1
+
+  ### Multiline options
+
+  # Multiline can be used for log messages spanning multiple lines. This is common
+  # for Java Stack Traces or C-Line Continuation
+
+  # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
+  #multiline.pattern: ^\[
+
+  # Defines if the pattern set under pattern should be negated or not. Default is false.
+  #multiline.negate: false
+
+  # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
+  # that was (not) matched before or after or as long as a pattern is not matched based on negate.
+  # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
+  #multiline.match: after
+
+
+#============================= Filebeat modules ===============================
+
+filebeat.config.modules:
+  # Glob pattern for configuration loading
+  path: ${path.config}/modules.d/*.yml
+
+  # Set to true to enable config reloading
+  reload.enabled: false
+
+  # Period on which files under path should be checked for changes
+  #reload.period: 10s
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+  index.number_of_shards: 1
+  #index.codec: best_compression
+  #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+fields:
+  build.profile: ${BUILD_PROFILE}
+#  env: staging
+
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here or by using the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+  # Kibana Host
+  # Scheme and port can be left out and will be set to the default (http and 5601)
+  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+  #host: "localhost:5601"
+
+  # Kibana Space ID
+  # ID of the Kibana Space into which the dashboards should be loaded. By default,
+  # the Default Space will be used.
+  #space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+# output.elasticsearch:
+  # Array of hosts to connect to.
+  # hosts: ["localhost:9200"]
+
+  # Optional protocol and basic auth credentials.
+  #protocol: "https"
+  #username: "elastic"
+  #password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+output.logstash:
+  # The Logstash hosts
+  hosts: ["localhost:5044"]
+
+  # Optional SSL. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Processors =====================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+  - add_host_metadata: ~
+  - add_cloud_metadata: ~
+  - drop_fields:
+      fields: ["beat.name", "beat.hostname", "beat.version", "host.os.family", "host.os.version", "host.architecture", "host.containerized"]
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== Xpack Monitoring ===============================
+# filebeat can export internal metrics to a central Elasticsearch monitoring
+# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+#================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true

+ 18 - 0
docker/start.sh

@@ -0,0 +1,18 @@
+#!/bin/sh
+
+BUILD_FOLDER="bin"
+MAIN_CLASS="cz.senslog.connector.app.Main" 
+LOG_PATH="/var/log/connector-app"
+DEBUG_PORT="5005"
+
+FILEBEAT_CONFIG_FILE="/etc/conf.d/filebeat.yml"
+
+if [ $DEBUG = "true" ]; then
+    DEBUG_PARAM="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=0.0.0.0:$DEBUG_PORT"
+fi
+
+if [ $LOG_MONITOR = "true" ]; then
+    filebeat  -c $FILEBEAT_CONFIG_FILE & 
+fi
+
+java -cp "$BUILD_FOLDER/*" -DlogPath=$LOG_PATH $DEBUG_PARAM $MAIN_CLASS $APP_PARAMS