Hello Kay, 

I am doing a fresh installation at the moment. Hopefully everything is 
working fine after that. 

Elasticsearch and Graylog are installed on the same machine. There is no 
other machine involved, so i'm wondering why elasticsearch is using this IP 
adress. The IP is used by another system which is not connected to graylog. 
That's so confusing. 

Within elasticsearch.conf I only changed cluster.name=graylog2.

I added graylog2.conf in appendix.

Kind Regards
Lucy

-- 
You received this message because you are subscribed to the Google Groups 
"graylog2" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.
# If you are running more than one instances of graylog2-server you have to 
select one of these
# instances as master. The master will perform some periodical tasks that 
non-masters won't perform.
is_master = true

# The auto-generated node ID will be stored in this file and read after 
restarts. It is a good idea
# to use an absolute file path here if you are starting graylog2-server from 
init scripts or similar.
node_id_file = /etc/graylog2-server-node-id

# You MUST set a secret to secure/pepper the stored user passwords here. Use at 
least 64 characters.
# Generate one by using for example: pwgen -s 96
password_secret = 
GTrwvad5BrZF9BFVbvtPparrreN2egmL2C8mCbf5s6Asx6czsweExvVMAzEEGQfy
# the default root user is named 'admin'
# root_username = admin
# You MUST specify a hash password for the root user (which you only need to 
initially set up the
# system and in case you lose connectivity to your authentication backend)
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = 
cbca62a8734a1c7afe39ac994d0a6936723790da11abe63470978178457efbe9

# Set plugin directory here (relative or absolute)
plugin_dir = plugin

# REST API listen URI. Must be reachable by other graylog2-server nodes if you 
run a cluster.
rest_listen_uri = http://127.0.0.1:12900/
# REST API transport address. Defaults to first non-loopback IPv4 system 
address and port 12900.
# This will be promoted in the cluster discovery APIs and other nodes may try 
to connect on this
# address. (see rest_listen_uri)
#rest_transport_uri = http://192.168.1.1:12900/

# Embedded elasticsearch configuration file
# pay attention to the working directory of the server, maybe use an absolute 
path here
#elasticsearch_config_file = /etc/graylog2-elasticsearch.yml

elasticsearch_max_docs_per_index = 10000000

# How many indices do you want to keep?
# elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total 
number of messages in your setup
elasticsearch_max_number_of_indices = 20

# Decide what happens with the oldest indices when the maximum number of 
indices is reached.
# The following strategies are availble:
#   - delete # Deletes the index completely (Default)
#   - close # Closes the index and hides it from the system. Can be re-opened 
later.
retention_strategy = delete

# How many ElasticSearch shards and replicas should be used per index? Note 
that this only applies to newly created indices.
elasticsearch_shards = 2
elasticsearch_replicas = 0

elasticsearch_index_prefix = graylog2

# Do you want to allow searches with leading wildcards? This can be extremely 
resource hungry and should only
# be enabled with care. See also: 
http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained
allow_leading_wildcard_searches = false

# settings to be passed to elasticsearch's client (overriding those in the 
provided elasticsearch_config_file)
# all these
# this must be the same as for your elasticsearch cluster
elasticsearch_cluster_name = graylog2

# you could also leave this out, but makes it easier to identify the graylog2 
client instance
#elasticsearch_node_name = graylog2-server

# we don't want the graylog2 server to store any data, or be master node
#elasticsearch_node_master = false
#elasticsearch_node_data = false

# use a different port if you run multiple elasticsearch nodes on one machine
#elasticsearch_transport_tcp_port = 9350
# we don't need to run the embedded HTTP server here
#elasticsearch_http_enabled = false

#elasticsearch_discovery_zen_ping_multicast_enabled = false
#elasticsearch_discovery_zen_ping_unicast_hosts = 192.168.1.203:9300


# Analyzer (tokenizer) to use for message and full_message field. The 
"standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, 
pattern, language, snowball, custom
# ElasticSearch documentation: 
http://www.elasticsearch.org/guide/reference/index-modules/analysis/
# Note that this setting only takes effect on newly created indices.
elasticsearch_analyzer = standard

# Batch size for all outputs. This is the maximum (!) number of messages an 
output module will get at once.
# For example, if this is set to 5000 (default), the ElasticSearch Output will 
not index more than 5000 messages
# at once. After that index operation is performed, the next batch will be 
indexed. If there is only 1 message
# waiting, it will only index that single message. It is important to raise 
this parameter if you send in so
# many messages that it is not enough to index 5000 messages at once. (Only at 
*really* high message rates)
output_batch_size = 15000

# The number of parallel running processors.
# Raise this number if your buffers are filling up.
processbuffer_processors = 15
outputbuffer_processors = 15

# Wait strategy describing how buffer processors wait on a cursor sequence. 
(default: sleeping)
# Possible types:
#  - yielding
#     Compromise between performance and CPU usage.
#  - sleeping
#     Compromise between performance and CPU usage. Latency spikes can occur 
after quiet periods.
#  - blocking
#     High throughput, low latency, higher CPU usage.
#  - busy_spinning
#     Avoids syscalls which could introduce latency jitter. Best when threads 
can be bound to specific CPU cores.
processor_wait_strategy = blocking

# Size of internal ring buffers. Raise this if raising outputbuffer_processors 
does not help anymore.
# For optimum performance your LogMessage objects in the ring buffer should fit 
in your CPU L3 cache.
# Start server with --statistics flag to see buffer utilization.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 1024

# MongoDB Configuration
mongodb_useauth = false
#mongodb_user = grayloguser
#mongodb_password = *********
mongodb_host = 127.0.0.1
#mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019
mongodb_database = graylog2
mongodb_port = 27017

# Raise this according to the maximum connections your MongoDB server can 
handle if you encounter MongoDB connection problems.
mongodb_max_connections = 100

# Number of threads allowed to be blocked by MongoDB connections multiplier. 
Default: 5
# If mongodb_max_connections is 100, and 
mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. 
More than that and an exception will be thrown.
# 
http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5

# Drools Rule File (Use to rewrite incoming log messages)
# See: 
http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
# rules_file = /etc/graylog2.drl

# Email transport
transport_email_enabled = false
transport_email_hostname = mail.example.com
transport_email_port = 587
transport_email_use_auth = true
transport_email_use_tls = true
transport_email_use_ssl = true
transport_email_auth_username = [email protected]
transport_email_auth_password = secret
transport_email_subject_prefix = [graylog2]
transport_email_from_email = [email protected]

Reply via email to