plainolneesh commented on a change in pull request #338:
URL: https://github.com/apache/fluo-muchos/pull/338#discussion_r443816003



##########
File path: ansible/roles/logstash/templates/pipeline.conf
##########
@@ -0,0 +1,57 @@
+input {
+    beats { 
+        port => 5044
+    }
+    file {
+        path= "/media/ephemeral0/logs/accumulo/*.log"
+    }
+}
+filter {
+  if [fileset][module] == "logstash" {   
+      if [fileset][name] == "hadoop" {
+    grok {
+
+        #2020-03-30 15:01:17,801 INFO org.apache.hadoop.hdfs.StateChange: DIR* 
completeFile: /accumulo/tables/+r/root_tablet/F000003o.rf_tmp is closed by 
DFSClient_NONMAPREDUCE_69506301_11
+        match => { "message" => "%{TIMESTAMP_ISO8601}%{SPACE}%{WORD:log 
level}%{GREEDYDATA:hostname}%{GREEDYDATA:LINK}:%{GREEDYDATA:state}%{WORD:module}:
 %{GREEDYDATA:message}"}
+        match => { "message" => "%{TIMESTAMP_ISO8601}%{SPACE}%{WORD:log 
level}%{GREEDYDATA:ip}%{GREEDYDATA:LINK}:%{GREEDYDATA:state}%{WORD:module}: 
%{GREEDYDATA:message}"}
+    mutate {    
+        add_field => [ "received_at", "%{@timestamp}" ]
+        add_field => [ "received_from", "%{host}" ]
+    }
+
+    else if [fileset][name] = "accumulo"{
+#2020-03-30 14:01:06,322 [gc.GarbageCollectWriteAheadLogs] DEBUG: New tablet 
servers noticed: [worker3:9997[1000005c5390005], worker1:9997[1000005c5390004], 
worker2:9997[1000005c5390006]]
+        match=> { "message" => "%{TIMESTAMP_ISO8601:TIMESTAMP} 
\[%{GREEDYDATA:module}\.%{GREEDYDATA:class_name}\] %{WORD:level}: 
%{GREEDYDATA:message}\[%{GREEDYDATA:hostname}:%{GREEDYDATA:hostname}\:%{GREEDYDATA:hostname}\:%{GREEDYDATA:hostname}\]"}

Review comment:
       This has been moved to logstash-simple-2.conf But if you do see that my 
naming conventions are off there, please let me know. 
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to