Automatically generate docs for cassandra.yaml

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/f2f30714
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/f2f30714
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/f2f30714

Branch: refs/heads/trunk
Commit: f2f30714436457dcb175b6365bf85d116f3763d9
Parents: 5d65542
Author: Tyler Hobbs <[email protected]>
Authored: Tue Jun 21 12:40:40 2016 -0500
Committer: Sylvain Lebresne <[email protected]>
Committed: Tue Jun 21 19:53:48 2016 +0200

----------------------------------------------------------------------
 conf/cassandra.yaml        | 222 +++++++++++++++++++++++++---------------
 doc/Makefile               |  27 +++++
 doc/convert_yaml_to_rst.py | 144 ++++++++++++++++++++++++++
 doc/source/index.rst       |   1 +
 4 files changed, 314 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/f2f30714/conf/cassandra.yaml
----------------------------------------------------------------------
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
index dcd5278..c43820e 100644
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@ -35,20 +35,22 @@ num_tokens: 256
 # Only supported with the Murmur3Partitioner.
 # allocate_tokens_for_keyspace: KEYSPACE
 
-# initial_token allows you to specify tokens manually.  While you can use # it 
with
+# initial_token allows you to specify tokens manually.  While you can use it 
with
 # vnodes (num_tokens > 1, above) -- in which case you should provide a 
-# comma-separated list -- it's primarily used when adding nodes # to legacy 
clusters 
+# comma-separated list -- it's primarily used when adding nodes to legacy 
clusters 
 # that do not have vnodes enabled.
 # initial_token:
 
 # See http://wiki.apache.org/cassandra/HintedHandoff
 # May either be "true" or "false" to enable globally
 hinted_handoff_enabled: true
+
 # When hinted_handoff_enabled is true, a black list of data centers that will 
not
 # perform hinted handoff
-#hinted_handoff_disabled_datacenters:
+# hinted_handoff_disabled_datacenters:
 #    - DC1
 #    - DC2
+
 # this defines the maximum amount of time a dead host will have hints
 # generated.  After it has been dead this long, new hints for it will not be
 # created until it has been seen alive and gone down again.
@@ -193,26 +195,44 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
 # If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
 # commitlog_directory: /var/lib/cassandra/commitlog
 
-# policy for data disk failures:
-# die: shut down gossip and client transports and kill the JVM for any fs 
errors or
-#      single-sstable errors, so the node can be replaced.
-# stop_paranoid: shut down gossip and client transports even for 
single-sstable errors,
-#                kill the JVM for errors during startup.
-# stop: shut down gossip and client transports, leaving the node effectively 
dead, but
-#       can still be inspected via JMX, kill the JVM for errors during startup.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+# Policy for data disk failures:
+#
+# die
+#   shut down gossip and client transports and kill the JVM for any fs errors 
or
+#   single-sstable errors, so the node can be replaced.
+#
+# stop_paranoid
+#   shut down gossip and client transports even for single-sstable errors,
+#   kill the JVM for errors during startup.
+#
+# stop
+#   shut down gossip and client transports, leaving the node effectively dead, 
but
+#   can still be inspected via JMX, kill the JVM for errors during startup.
+#
+# best_effort
+#    stop using the failed disk and respond to requests based on
+#    remaining available sstables.  This means you WILL see obsolete
+#    data at CL.ONE!
+#
+# ignore
+#    ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
 disk_failure_policy: stop
 
-# policy for commit disk failures:
-# die: shut down gossip and Thrift and kill the JVM, so the node can be 
replaced.
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# stop_commit: shutdown the commit log, letting writes collect but
-#              continuing to service reads, as in pre-2.0.5 Cassandra
-# ignore: ignore fatal errors and let the batches fail
+# Policy for commit disk failures:
+#
+# die
+#   shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+#
+# stop
+#   shut down gossip and Thrift, leaving the node effectively dead, but
+#   can still be inspected via JMX.
+#
+# stop_commit
+#   shutdown the commit log, letting writes collect but
+#   continuing to service reads, as in pre-2.0.5 Cassandra
+#
+# ignore
+#   ignore fatal errors and let the batches fail
 commit_failure_policy: stop
 
 # Maximum size of the native protocol prepared statement cache
@@ -272,11 +292,14 @@ key_cache_save_period: 14400
 # Disabled by default, meaning all keys are going to be saved
 # key_cache_keys_to_save: 100
 
-# Row cache implementation class name.
-# Available implementations:
-#   org.apache.cassandra.cache.OHCProvider                Fully off-heap row 
cache implementation (default).
-#   org.apache.cassandra.cache.SerializingCacheProvider   This is the row 
cache implementation availabile
-#                                                         in previous releases 
of Cassandra.
+# Row cache implementation class name. Available implementations:
+#
+# org.apache.cassandra.cache.OHCProvider
+#   Fully off-heap row cache implementation (default).
+#
+# org.apache.cassandra.cache.SerializingCacheProvider
+#   This is the row cache implementation availabile
+#   in previous releases of Cassandra.
 # row_cache_class_name: org.apache.cassandra.cache.OHCProvider
 
 # Maximum size of the row cache in memory.
@@ -371,7 +394,7 @@ commitlog_segment_size_in_mb: 32
 # Compression to apply to the commit log. If omitted, the commit log
 # will be written uncompressed.  LZ4, Snappy, and Deflate compressors
 # are supported.
-#commitlog_compression:
+# commitlog_compression:
 #   - class_name: LZ4Compressor
 #     parameters:
 #         -
@@ -448,9 +471,15 @@ concurrent_materialized_view_writes: 32
 
 # Specify the way Cassandra allocates and manages memtable memory.
 # Options are:
-#   heap_buffers:    on heap nio buffers
-#   offheap_buffers: off heap (direct) nio buffers
-#   offheap_objects: off heap objects
+#
+# heap_buffers
+#   on heap nio buffers
+#
+# offheap_buffers
+#   off heap (direct) nio buffers
+#
+# offheap_objects
+#    off heap objects
 memtable_allocation_type: heap_buffers
 
 # Total space to use for commit logs on disk.
@@ -508,8 +537,7 @@ ssl_storage_port: 7001
 # Address or interface to bind to and tell other Cassandra nodes to connect to.
 # You _must_ change this if you want multiple nodes to be able to communicate!
 #
-# Set listen_address OR listen_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
+# Set listen_address OR listen_interface, not both.
 #
 # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
 # will always do the Right Thing _if_ the node is properly configured
@@ -518,12 +546,16 @@ ssl_storage_port: 7001
 #
 # Setting listen_address to 0.0.0.0 is always wrong.
 #
+listen_address: localhost
+
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# listen_interface: eth0
+
 # If you choose to specify the interface by name and the interface has an ipv4 
and an ipv6 address
 # you can specify which should be chosen using listen_interface_prefer_ipv6. 
If false the first ipv4
 # address will be used. If true the first ipv6 address will be used. Defaults 
to false preferring
 # ipv4. If there is only one address it will be selected regardless of 
ipv4/ipv6.
-listen_address: localhost
-# listen_interface: eth0
 # listen_interface_prefer_ipv6: false
 
 # Address to broadcast to other Cassandra nodes
@@ -582,8 +614,7 @@ start_rpc: false
 # The address or interface to bind the Thrift RPC service and native transport
 # server to.
 #
-# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
+# Set rpc_address OR rpc_interface, not both.
 #
 # Leaving rpc_address blank has the same effect as on listen_address
 # (i.e. it will be based on the configured hostname of the node).
@@ -592,13 +623,16 @@ start_rpc: false
 # set broadcast_rpc_address to a value other than 0.0.0.0.
 #
 # For security reasons, you should not expose this port to the internet.  
Firewall it if needed.
-#
+rpc_address: localhost
+
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# rpc_interface: eth1
+
 # If you choose to specify the interface by name and the interface has an ipv4 
and an ipv6 address
 # you can specify which should be chosen using rpc_interface_prefer_ipv6. If 
false the first ipv4
 # address will be used. If true the first ipv6 address will be used. Defaults 
to false preferring
 # ipv4. If there is only one address it will be selected regardless of 
ipv4/ipv6.
-rpc_address: localhost
-# rpc_interface: eth1
 # rpc_interface_prefer_ipv6: false
 
 # port for Thrift to listen for clients on
@@ -615,16 +649,18 @@ rpc_keepalive: true
 
 # Cassandra provides two out-of-the-box options for the RPC Server:
 #
-# sync  -> One thread per thrift connection. For a very large number of 
clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 180KB is the minimum 
stack size
-#          per thread, and that will correspond to your use of virtual memory 
(but physical memory
-#          may be limited depending on use of stack space).
+# sync
+#   One thread per thrift connection. For a very large number of clients, 
memory
+#   will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack 
size
+#   per thread, and that will correspond to your use of virtual memory (but 
physical memory
+#   may be limited depending on use of stack space).
 #
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift 
clients are handled
-#          asynchronously using a small number of threads that does not vary 
with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc 
requests are still
-#          synchronous (one thread per active request). If hsha is selected 
then it is essential
-#          that rpc_max_threads is changed from the default value of unlimited.
+# hsha
+#   Stands for "half synchronous, half asynchronous." All thrift clients are 
handled
+#   asynchronously using a small number of threads that does not vary with the 
amount
+#   of thrift clients (and thus scales well to many clients). The rpc requests 
are still
+#   synchronous (one thread per active request). If hsha is selected then it 
is essential
+#   that rpc_max_threads is changed from the default value of unlimited.
 #
 # The default is sync because on Windows hsha is about 30% slower.  On Linux,
 # sync/hsha performance is about the same, with hsha of course using less 
memory.
@@ -653,13 +689,17 @@ rpc_server_type: sync
 # Uncomment to set socket buffer size for internode communication
 # Note that when setting this, the buffer size is limited by net.core.wmem_max
 # and when not setting it it is defined by net.ipv4.tcp_wmem
-# See:
+# See also:
 # /proc/sys/net/core/wmem_max
 # /proc/sys/net/core/rmem_max
 # /proc/sys/net/ipv4/tcp_wmem
 # /proc/sys/net/ipv4/tcp_wmem
-# and: man tcp
+# and 'man tcp'
 # internode_send_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
 # internode_recv_buff_size_in_bytes:
 
 # Frame size for thrift (maximum message length).
@@ -686,13 +726,15 @@ auto_snapshot: true
 # Granularity of the collation index of rows within a partition.
 # Increase if your rows are large, or if you have a very large
 # number of rows per partition.  The competing goals are these:
-#   1) a smaller granularity means more index entries are generated
-#      and looking up rows withing the partition by collation column
-#      is faster
-#   2) but, Cassandra will keep the collation index in memory for hot
-#      rows (as part of the key cache), so a larger granularity means
-#      you can cache more hot rows
+#
+# - a smaller granularity means more index entries are generated
+#   and looking up rows withing the partition by collation column
+#   is faster
+# - but, Cassandra will keep the collation index in memory for hot
+#   rows (as part of the key cache), so a larger granularity means
+#   you can cache more hot rows
 column_index_size_in_kb: 64
+
 # Per sstable indexed key cache entries (the collation index in memory
 # mentioned above) exceeding this size will not be held on heap.
 # This means that only partition information is held on heap and the
@@ -788,6 +830,7 @@ cross_node_timeout: false
 
 # endpoint_snitch -- Set this to a class that implements
 # IEndpointSnitch.  The snitch has two functions:
+#
 # - it teaches Cassandra enough about your network topology to route
 #   requests efficiently
 # - it allows Cassandra to spread replicas around your cluster to avoid
@@ -803,34 +846,40 @@ cross_node_timeout: false
 # IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN
 # ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED.
 #
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
+# Out of the box, Cassandra provides:
+#
+# SimpleSnitch:
 #    Treats Strategy order as proximity. This can improve cache
 #    locality when disabling read repair.  Only appropriate for
 #    single-datacenter deployments.
-#  - GossipingPropertyFileSnitch
+#
+# GossipingPropertyFileSnitch
 #    This should be your go-to snitch for production use.  The rack
 #    and datacenter for the local node are defined in
 #    cassandra-rackdc.properties and propagated to other nodes via
 #    gossip.  If cassandra-topology.properties exists, it is used as a
 #    fallback, allowing migration from the PropertyFileSnitch.
-#  - PropertyFileSnitch:
+#
+# PropertyFileSnitch:
 #    Proximity is determined by rack and data center, which are
 #    explicitly configured in cassandra-topology.properties.
-#  - Ec2Snitch:
+#
+# Ec2Snitch:
 #    Appropriate for EC2 deployments in a single Region. Loads Region
 #    and Availability Zone information from the EC2 API. The Region is
 #    treated as the datacenter, and the Availability Zone as the rack.
 #    Only private IPs are used, so this will not work across multiple
 #    Regions.
-#  - Ec2MultiRegionSnitch:
+#
+# Ec2MultiRegionSnitch:
 #    Uses public IPs as broadcast_address to allow cross-region
 #    connectivity.  (Thus, you should set seed addresses to the public
 #    IP as well.) You will need to open the storage_port or
 #    ssl_storage_port on the public IP firewall.  (For intra-Region
 #    traffic, Cassandra will switch to the private IP after
 #    establishing a connection.)
-#  - RackInferringSnitch:
+#
+# RackInferringSnitch:
 #    Proximity is determined by rack and data center, which are
 #    assumed to correspond to the 3rd and 2nd octet of each node's IP
 #    address, respectively.  Unless this happens to match your
@@ -870,20 +919,26 @@ dynamic_snitch_badness_threshold: 0.1
 request_scheduler: org.apache.cassandra.scheduler.NoScheduler
 
 # Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
+#
+# NoScheduler
+#   Has no options
+#
 # RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
+#   throttle_limit
+#     The throttle_limit is the number of in-flight
+#     requests per client.  Requests beyond 
+#     that limit are queued up until
+#     running requests can complete.
+#     The value of 80 here is twice the number of
+#     concurrent_reads + concurrent_writes.
+#   default_weight
+#     default_weight is optional and allows for
+#     overriding the default which is 1.
+#   weights
+#     Weights are optional and will default to 1 or the
+#     overridden default_weight. The weight translates into how
+#     many requests are handled during each turn of the
+#     RoundRobin, based on the scheduler id.
 #
 # request_scheduler_options:
 #    throttle_limit: 80
@@ -905,7 +960,7 @@ request_scheduler: 
org.apache.cassandra.scheduler.NoScheduler
 # FIPS compliant settings can be configured at JVM level and should not
 # involve changing encryption settings here:
 # 
https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
-# NOTE: No custom encryption options are enabled at the moment
+# *NOTE* No custom encryption options are enabled at the moment
 # The available internode options are : all, none, dc, rack
 #
 # If set to dc cassandra will encrypt the traffic between the DCs
@@ -948,9 +1003,16 @@ client_encryption_options:
 
 # internode_compression controls whether traffic between nodes is
 # compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
+# Can be:
+#
+# all
+#   all traffic is compressed
+#
+# dc
+#   traffic between different datacenters is compressed
+#
+# none
+#   nothing is compressed.
 internode_compression: dc
 
 # Enable or disable tcp_nodelay for inter-dc communication.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/f2f30714/doc/Makefile
----------------------------------------------------------------------
diff --git a/doc/Makefile b/doc/Makefile
index d6f0cd7..778448a 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -14,6 +14,8 @@ ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees 
$(PAPEROPT_$(PAPER)) $(SPHINXOPTS) sou
 # the i18n builder cannot share the environment and doctrees with the others
 I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
 
+MAKE_CASSANDRA_YAML = python convert_yaml_to_rst.py ../conf/cassandra.yaml 
source/cassandra_config_file.rst
+
 .PHONY: help
 help:
        @echo "Please use \`make <target>' where <target> is one of"
@@ -50,36 +52,42 @@ clean:
 
 .PHONY: html
 html:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
        @echo
        @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
 
 .PHONY: dirhtml
 dirhtml:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
        @echo
        @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
 
 .PHONY: singlehtml
 singlehtml:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
        @echo
        @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
 
 .PHONY: pickle
 pickle:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
        @echo
        @echo "Build finished; now you can process the pickle files."
 
 .PHONY: json
 json:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
        @echo
        @echo "Build finished; now you can process the JSON files."
 
 .PHONY: htmlhelp
 htmlhelp:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
        @echo
        @echo "Build finished; now you can run HTML Help Workshop with the" \
@@ -87,6 +95,7 @@ htmlhelp:
 
 .PHONY: qthelp
 qthelp:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
        @echo
        @echo "Build finished; now you can run "qcollectiongenerator" with the" 
\
@@ -97,6 +106,7 @@ qthelp:
 
 .PHONY: applehelp
 applehelp:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
        @echo
        @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@@ -106,6 +116,7 @@ applehelp:
 
 .PHONY: devhelp
 devhelp:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
        @echo
        @echo "Build finished."
@@ -116,18 +127,21 @@ devhelp:
 
 .PHONY: epub
 epub:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
        @echo
        @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
 
 .PHONY: epub3
 epub3:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
        @echo
        @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
 
 .PHONY: latex
 latex:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
        @echo
        @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@@ -136,6 +150,7 @@ latex:
 
 .PHONY: latexpdf
 latexpdf:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
        @echo "Running LaTeX files through pdflatex..."
        $(MAKE) -C $(BUILDDIR)/latex all-pdf
@@ -143,6 +158,7 @@ latexpdf:
 
 .PHONY: latexpdfja
 latexpdfja:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
        @echo "Running LaTeX files through platex and dvipdfmx..."
        $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@@ -156,12 +172,14 @@ text:
 
 .PHONY: man
 man:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
        @echo
        @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
 
 .PHONY: texinfo
 texinfo:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
        @echo
        @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@@ -170,6 +188,7 @@ texinfo:
 
 .PHONY: info
 info:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
        @echo "Running Texinfo files through makeinfo..."
        make -C $(BUILDDIR)/texinfo info
@@ -177,18 +196,21 @@ info:
 
 .PHONY: gettext
 gettext:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
        @echo
        @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
 
 .PHONY: changes
 changes:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
        @echo
        @echo "The overview file is in $(BUILDDIR)/changes."
 
 .PHONY: linkcheck
 linkcheck:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
        @echo
        @echo "Link check complete; look for any errors in the above output " \
@@ -196,30 +218,35 @@ linkcheck:
 
 .PHONY: doctest
 doctest:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
        @echo "Testing of doctests in the sources finished, look at the " \
              "results in $(BUILDDIR)/doctest/output.txt."
 
 .PHONY: coverage
 coverage:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
        @echo "Testing of coverage in the sources finished, look at the " \
              "results in $(BUILDDIR)/coverage/python.txt."
 
 .PHONY: xml
 xml:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
        @echo
        @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
 
 .PHONY: pseudoxml
 pseudoxml:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
        @echo
        @echo "Build finished. The pseudo-XML files are in 
$(BUILDDIR)/pseudoxml."
 
 .PHONY: dummy
 dummy:
+       $(MAKE_CASSANDRA_YAML)
        $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
        @echo
        @echo "Build finished. Dummy builder generates no files."

http://git-wip-us.apache.org/repos/asf/cassandra/blob/f2f30714/doc/convert_yaml_to_rst.py
----------------------------------------------------------------------
diff --git a/doc/convert_yaml_to_rst.py b/doc/convert_yaml_to_rst.py
new file mode 100644
index 0000000..426286a
--- /dev/null
+++ b/doc/convert_yaml_to_rst.py
@@ -0,0 +1,144 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A script to convert cassandra.yaml into ReStructuredText for
+the online documentation.
+
+Usage:
+
+    convert_yaml_to_rest.py conf/cassandra.yaml docs/source/conf.rst
+"""
+
+import sys
+import re
+
+# Detects options, whether commented or uncommented.
+# Group 1 will be non-empty if the option is commented out.
+# Group 2 will contain the option name.
+# Group 3 will contain the default value, if one exists.
+option_re = re.compile(r"^(# ?)?([a-z0-9_]+): ?([^/].*)")
+
+# Detects normal comment lines.
+commented_re = re.compile(r"^# ?(.*)")
+
+# A set of option names that have complex values (i.e. lists or dicts).
+# This list is hardcoded because there did not seem to be another
+# good way to reliably detect this case, especially considering
+# that these can be commented out (making it useless to use a yaml parser).
+COMPLEX_OPTIONS = (
+    'seed_provider',
+    'request_scheduler_options',
+    'data_file_directories',
+    'commitlog_compression',
+    'hints_compression',
+    'server_encryption_options',
+    'client_encryption_options',
+    'transparent_data_encryption_options',
+    'hinted_handoff_disabled_datacenters'
+)
+
+
+def convert(yaml_file, dest_file):
+    with open(yaml_file, 'r') as f:
+        # Trim off the boilerplate header
+        lines = f.readlines()[7:]
+
+    with open(dest_file, 'w') as outfile:
+        outfile.write("Cassandra Config File\n")
+        outfile.write("=====================\n")
+
+        # since comments preceed an option, this holds all of the comment
+        # lines we've seen since the last option
+        comments_since_last_option = []
+        line_iter = iter(lines)
+        while True:
+            try:
+                line = next(line_iter)
+            except StopIteration:
+                break
+
+            match = option_re.match(line)
+            if match:
+                option_name = match.group(2)
+                is_commented = bool(match.group(1))
+
+                is_complex = option_name in COMPLEX_OPTIONS
+                complex_option = read_complex_option(line_iter) if is_complex 
else None
+
+                write_section_header(option_name, outfile)
+                write_comments(comments_since_last_option, is_commented, 
outfile)
+                if is_complex:
+                    write_complex_option(complex_option, outfile)
+                else:
+                    maybe_write_default_value(match, outfile)
+                comments_since_last_option = []
+            else:
+                comment_match = commented_re.match(line)
+                if comment_match:
+                    comments_since_last_option.append(comment_match.group(1))
+                elif line == "\n":
+                    comments_since_last_option.append('')
+
+
+def write_section_header(option_name, outfile):
+    outfile.write("\n")
+    outfile.write("``%s``\n" % (option_name,))
+    outfile.write("-" * (len(option_name) + 4) + "\n")
+
+
+def write_comments(comment_lines, is_commented, outfile):
+    if is_commented:
+        outfile.write("*This option is commented out by default.*\n")
+
+    for comment in comment_lines:
+        if "SAFETY THRESHOLDS" not in comment_lines:
+            outfile.write(comment + "\n")
+
+
+def maybe_write_default_value(option_match, outfile):
+    default_value = option_match.group(3)
+    if default_value and default_value != "\n":
+        outfile.write("\n*Default Value:* %s\n" % (default_value,))
+
+
+def read_complex_option(line_iter):
+    option_lines = []
+    try:
+        while True:
+            line = next(line_iter)
+            if line == '\n':
+                return option_lines
+            else:
+                option_lines.append(line)
+    except StopIteration:
+        return option_lines
+
+
+def write_complex_option(lines, outfile):
+    outfile.write("\n*Default Value (complex option)*::\n\n")
+    for line in lines:
+        outfile.write((" " * 4) + line)
+
+
+if __name__ == '__main__':
+    if len(sys.argv) != 3:
+        print >> sys.stderr, "Usage: %s <yaml source file> <rst dest file>" % 
(sys.argv[0],)
+        sys.exit(1)
+
+    yaml_file = sys.argv[1]
+    dest_file = sys.argv[2]
+    convert(yaml_file, dest_file)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/f2f30714/doc/source/index.rst
----------------------------------------------------------------------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index a68d464..16f1323 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -31,6 +31,7 @@ Contents:
    cql
    cqlsh
    operations
+   cassandra_config_file
    troubleshooting
    faq
    contactus

Reply via email to