Hi,

I am seeing the below issue while starting InfluxDB version 
influxdb-1.0.0-1.x86_64 on 
CentOS Linux release 7.2.1511 (Core)

cat /etc/influxdb/influxdb.conf
### Welcome to the InfluxDB configuration file.

# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and 
other
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting.
reporting-disabled = false

# we'll try to get the hostname automatically, but if it the os returns 
something
# that isn't resolvable by other servers in the cluster, use this option to
# manually set the hostname
# hostname = "localhost"

###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###

[meta]
  # Where the metadata/raft database is stored
  dir = "/var/lib/influxdb/meta"

  retention-autocreate = true

  # If log messages are printed for the meta service
  logging-enabled = true
  pprof-enabled = false

  # The default duration for leases.
  lease-duration = "1m0s"

###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###

[data]
  # Controls if this node holds time series data shards in the cluster
  enabled = true

  dir = "/var/lib/influxdb/data"

  # These are the WAL settings for the storage engine >= 0.9.3
  wal-dir = "/var/lib/influxdb/wal"
  wal-logging-enabled = true

  # Trace logging provides more verbose output around the tsm engine. Turning
  # this on can provide more useful output for debugging tsm engine issues.
  # trace-logging-enabled = false

  # Whether queries should be logged before execution. Very useful for 
troubleshooting, but will
  # log any sensitive data contained within a query.
  # query-log-enabled = true

  # Settings for the TSM engine

  # CacheMaxMemorySize is the maximum size a shard's cache can
  # reach before it starts rejecting writes.
  # cache-max-memory-size = 524288000

  # CacheSnapshotMemorySize is the size at which the engine will
  # snapshot the cache and write it to a TSM file, freeing up memory
  # cache-snapshot-memory-size = 26214400

  # CacheSnapshotWriteColdDuration is the length of time at
  # which the engine will snapshot the cache and write it to
  # a new TSM file if the shard hasn't received writes or deletes
  # cache-snapshot-write-cold-duration = "1h"

  # MinCompactionFileCount is the minimum number of TSM files
  # that need to exist before a compaction cycle will run
  # compact-min-file-count = 3

  # CompactFullWriteColdDuration is the duration at which the engine
  # will compact all TSM files in a shard if it hasn't received a
  # write or delete
  # compact-full-write-cold-duration = "24h"

  # MaxPointsPerBlock is the maximum number of points in an encoded
  # block in a TSM file. Larger numbers may yield better compression
  # but could incur a performance penalty when querying
  # max-points-per-block = 1000

###
### [coordinator]
###
### Controls the clustering service configuration.
###

[coordinator]
  write-timeout = "10s"
  max-concurrent-queries = 0
  query-timeout = "0"
  log-queries-after = "0"
  max-select-point = 0
  max-select-series = 0
  max-select-buckets = 0

###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###

[retention]
  enabled = true
  check-interval = "30m"

###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data 
arrives.
### Only shards that, after creation, will have both a start- and end-time in 
the
### future, will ever be created. Shards are never precreated that would be 
wholly
### or partially in the past.

[shard-precreation]
  enabled = true
  check-interval = "10m"
  advance-period = "30m"

###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.

[monitor]
  store-enabled = true # Whether to record statistics internally.
  store-database = "_internal" # The destination database for recorded 
statistics
  store-interval = "10s" # The interval at which to record statistics

###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If 
HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] 
service.
###

[admin]
  enabled = true
  bind-address = ":8083"
  https-enabled = false
  https-certificate = "/etc/ssl/influxdb.pem"

###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###

[http]
  enabled = true
  bind-address = ":8086"
  auth-enabled = false
  log-enabled = true
  write-tracing = false
  pprof-enabled = false
  https-enabled = false
  https-certificate = "/etc/ssl/influxdb.pem"
  ### Use a separate private key location.
  # https-private-key = ""
  max-row-limit = 10000
  realm = "InfluxDB"

###
### [subsciber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###

[subsciber]
  enabled = true
  http-timeout = "30s"


###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###

[[graphite]]
  enabled = false
  # database = "graphite"
  # bind-address = ":2003"
  # protocol = "tcp"
  # consistency-level = "one"

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 5000 # will flush if this many points get buffered
  # batch-pending = 10 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't 
hit buffer limit
  # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP 
listener will fail if set above OS max.

  ### This string joins multiple matching 'measurement' values providing more 
control over the final measurement name.
  # separator = "."

  ### Default tags that will be added to all metrics.  These can be overridden 
at the template level
  ### or by tags extracted from metric
  # tags = ["region=us-east", "zone=1c"]

  ### Each template line requires a template pattern.  It can have an optional
  ### filter before the template and separated by spaces.  It can also have 
optional extra
  ### tags following the template.  Multiple tags should be separated by commas 
and no spaces
  ### similar to the line protocol format.  There can be only one default 
template.
  # templates = [
  #   "*.app env.service.resource.measurement",
  #   # Default template
  #   "server.*",
  # ]

###
### [collectd]
###
### Controls one or many listeners for collectd data.
###

[[collectd]]
  enabled = "true"
  bind-address = ":25826"
  database = "collectd_db"
  typesdb = "/opt/collectd/share/collectd/types.db"

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't 
hit buffer limit
  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener 
will fail if set above OS max.

###
### [opentsdb]
###
### Controls one or many listeners for OpenTSDB data.
###

[[opentsdb]]
  enabled = false
  # bind-address = ":4242"
  # database = "opentsdb"
  # retention-policy = ""
  # consistency-level = "one"
  # tls-enabled = false
  # certificate= ""
  # log-point-errors = true # Log an error for every malformed point.

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Only points
  # metrics received over the telnet protocol undergo batching.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't 
hit buffer limit

###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###

[[udp]]
  enabled = false
  # bind-address = ""
  # database = "udp"
  # retention-policy = ""

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't 
hit buffer limit
  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener 
will fail if set above OS max.

  # set the expected UDP payload size; lower values tend to yield better 
performance, default is max UDP size 65536
  # udp-payload-size = 65536

###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###

[continuous_queries]
  log-enabled = true
  enabled = true
  # run-interval = "1s" # interval for how often continuous queries will be 
checked if they need to run


Sep 16 14:06:44 test.testchat.app.in systemd[1]: Started InfluxDB is an 
open-source, distributed, time series database.
Sep 16 14:06:44 test.testchat.app.in systemd[1]: Starting InfluxDB is an 
open-source, distributed, time series database...
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 8888888           .d888 888 
                  8888888b.  888888b.
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888            d88P"  888   
                888  "Y88b 888  "88b
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888            888    888   
                888    888 888  .88P
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888   88888b.  888888 888 
888  888 888  888 888    888 8888888K.
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888   888 "88b 888    888 
888  888  Y8bd8P' 888    888 888  "Y88b
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888   888  888 888    888 
888  888   X88K   888    888 888    888
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 888   888  888 888    888 
Y88b 888 .d8""8b. 888  .d88P 888   d88P
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: 8888888 888  888 888    888 
 "Y88888 888  888 8888888P"  8888888P"
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: [run] 2016/09/16 14:06:44 
InfluxDB starting, version 1.0.0, branch master, commit 37992377a55fbc13
Sep 16 14:06:44 test.testchat.app.in influxd[3956]: [run] 2016/09/16 14:06:44 
Go version go1.6.2, GOMAXPROCS set to 1
Sep 16 14:06:45 test.testchat.app.in influxd[3956]: [run] 2016/09/16 14:06:45 
Using configuration at: /etc/influxdb/influxdb.conf
Sep 16 14:06:45 test.testchat.app.in influxd[3956]: run: parse config: toml: 
cannot load TOML value of type string into a Go boolean
Sep 16 14:06:45 test.testchat.app.in systemd[1]: influxdb.service: main process 
exited, code=exited, status=1/FAILURE
Sep 16 14:06:45 test.testchat.app.in systemd[1]: Unit influxdb.service entered 
failed state.
Sep 16 14:06:45 test.testchat.app.in systemd[1]: influxdb.service failed.
Sep 16 14:06:45 test.testchat.app.in systemd[1]: influxdb.service holdoff time 
over, scheduling restart.
Sep 16 14:06:45 test.testchat.app.in systemd[1]: start request repeated too 
quickly for influxdb.service
Sep 16 14:06:45 test.testchat.app.in systemd[1]: Failed to start InfluxDB is an 
open-source, distributed, time series database.
Sep 16 14:06:45 test.testchat.app.in systemd[1]: Unit influxdb.service entered 
failed state.
Sep 16 14:06:45 test.testchat.app.in systemd[1]: influxdb.service failed.

Any help will be highly appreciable.

Regards,

Kaushal

-- 
Remember to include the InfluxDB version number with all issue reports
--- 
You received this message because you are subscribed to the Google Groups 
"InfluxDB" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to influxdb+unsubscr...@googlegroups.com.
To post to this group, send email to influxdb@googlegroups.com.
Visit this group at https://groups.google.com/group/influxdb.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/influxdb/c16aeb9b-f904-47e7-84d6-a245e8a4cac2%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to