style95 closed pull request #3106: Couchdb persistency
URL: https://github.com/apache/incubator-openwhisk/pull/3106
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git a/ansible/couchdb.yml b/ansible/couchdb.yml
index b75cd099cd..3041dae2ca 100644
--- a/ansible/couchdb.yml
+++ b/ansible/couchdb.yml
@@ -14,5 +14,6 @@
when: not db.stat.exists
- hosts: db
+ serial: '1'
roles:
- - couchdb
\ No newline at end of file
+ - couchdb
diff --git a/ansible/environments/distributed/group_vars/all
b/ansible/environments/distributed/group_vars/all
index c36424b840..05acf34a4c 100755
--- a/ansible/environments/distributed/group_vars/all
+++ b/ansible/environments/distributed/group_vars/all
@@ -6,6 +6,7 @@ db_username: couch_user
db_password: couch_password
db_host: "{{ groups['db'] | first }}"
db_prefix: whisk_distributed_
+db_confdir: "{{ config_root_dir }}/couchdb"
whisk_version_name: distributed
config_root_dir: /tmp/wskconf
diff --git a/ansible/environments/docker-machine/group_vars/all
b/ansible/environments/docker-machine/group_vars/all
index 6f68e730f9..7328176079 100644
--- a/ansible/environments/docker-machine/group_vars/all
+++ b/ansible/environments/docker-machine/group_vars/all
@@ -22,6 +22,7 @@ db_password: "{{ lookup('ini', 'db_password section=db_creds
file={{ playbook_di
db_protocol: "{{ lookup('ini', 'db_protocol section=db_creds file={{
playbook_dir }}/db_local.ini') }}"
db_host: "{{ lookup('ini', 'db_host section=db_creds file={{ playbook_dir
}}/db_local.ini') }}"
db_port: "{{ lookup('ini', 'db_port section=db_creds file={{ playbook_dir
}}/db_local.ini') }}"
+db_confdir: "{{ config_root_dir }}/couchdb"
# API GW connection configuration
apigw_auth_user: ""
diff --git a/ansible/environments/local/group_vars/all
b/ansible/environments/local/group_vars/all
index b3700ff399..82852cf0c9 100755
--- a/ansible/environments/local/group_vars/all
+++ b/ansible/environments/local/group_vars/all
@@ -16,6 +16,7 @@ db_password: "{{ lookup('ini', 'db_password section=db_creds
file={{ playbook_di
db_protocol: "{{ lookup('ini', 'db_protocol section=db_creds file={{
playbook_dir }}/db_local.ini') }}"
db_host: "{{ lookup('ini', 'db_host section=db_creds file={{ playbook_dir
}}/db_local.ini') }}"
db_port: "{{ lookup('ini', 'db_port section=db_creds file={{ playbook_dir
}}/db_local.ini') }}"
+db_confdir: "{{ config_root_dir }}/couchdb"
# API GW connection configuration
apigw_auth_user: ""
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index cd4eeba8aa..6a140f38ee 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -195,6 +195,8 @@ nginx:
# The key db.whisk.auth is the name of the authentication database where all
keys of all users are stored.
# The db_prefix is defined for each environment on its own. The CouchDb
credentials are also defined for each environment on its own.
db:
+ dir:
+ become: "{{ db_dir_become | default(true) }}"
instances: "{{ groups['db'] | length }}"
authkeys:
- guest
diff --git a/ansible/roles/couchdb/files/default.d/README
b/ansible/roles/couchdb/files/default.d/README
new file mode 100644
index 0000000000..cae343ba4a
--- /dev/null
+++ b/ansible/roles/couchdb/files/default.d/README
@@ -0,0 +1,11 @@
+CouchDB default configuration files
+
+Files found under the etc/default.d directory that end with .ini are
+parsed within couchdb(1) at startup.
+
+This directory is intended for distribution-specific overrides of
+CouchDB defaults. Package maintainers should be placing overrides in
+this directory.
+
+System administrator should place overrides in the etc/local.d directory
+instead.
diff --git a/ansible/roles/couchdb/files/default.ini
b/ansible/roles/couchdb/files/default.ini
new file mode 100644
index 0000000000..ee5e4735b4
--- /dev/null
+++ b/ansible/roles/couchdb/files/default.ini
@@ -0,0 +1,560 @@
+; Upgrading CouchDB will overwrite this file.
+[vendor]
+name = The Apache Software Foundation
+
+[couchdb]
+uuid =
+database_dir = ./data
+view_index_dir = ./data
+; util_driver_dir =
+; plugin_dir =
+os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+max_dbs_open = 500
+delayed_commits = false
+; Method used to compress everything that is appended to database and view
index files, except
+; for attachments (see the attachments section). Available methods are:
+;
+; none - no compression
+; snappy - use google snappy, a very fast compressor/decompressor
+; deflate_[N] - use zlib's deflate, N is the compression level which ranges
from 1 (fastest,
+; lowest compression ratio) to 9 (slowest, highest compression
ratio)
+file_compression = snappy
+; Higher values may give better read performance due to less read operations
+; and/or more OS page cache hits, but they can also increase overall response
+; time for writes when there are many attachment write requests in parallel.
+attachment_stream_buffer_size = 4096
+; Default security object for databases if not explicitly set
+; everyone - same as couchdb 1.0, everyone can read/write
+; admin_only - only admins can read/write
+; admin_local - sharded dbs on :5984 are read/write for everyone,
+; local dbs on :5986 are read/write for admins only
+default_security = admin_only
+; btree_chunk_size = 1279
+; maintenance_mode = false
+; stem_interactive_updates = true
+; update_lru_on_read = true
+; uri_file =
+; The speed of processing the _changes feed with doc_ids filter can be
+; influenced directly with this setting - increase for faster processing at the
+; expense of more memory usage.
+changes_doc_ids_optimization_threshold = 100
+; Maximum document ID length. Can be set to an integer or 'infinity'.
+;max_document_id_length = infinity
+;
+; Limit maximum document size. Requests to create / update documents with a
body
+; size larger than this will fail with a 413 http error. This limit applies to
+; requests which update a single document as well as individual documents from
+; a _bulk_docs request. Since there is no canonical size of json encoded data,
+; due to variabiliy in what is escaped or how floats are encoded, this limit is
+; applied conservatively. For example 1.0e+16 could be encoded as 1e16, so 4
used
+; for size calculation instead of 7.
+;max_document_size = 4294967296 ; bytes
+
+; Maximum attachment size.
+; max_attachment_size = infinity
+
+[cluster]
+q=8
+n=3
+; placement = metro-dc-a:2,metro-dc-b:1
+
+[chttpd]
+; These settings affect the main, clustered port (5984 by default).
+port = 5984
+bind_address = 127.0.0.1
+backlog = 512
+docroot = ./share/www
+socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+require_valid_user = false
+; List of headers that will be kept when the header Prefer: return=minimal is
included in a request.
+; If Server header is left out, Mochiweb will add its own one in.
+prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type,
ETag, Server, Transfer-Encoding, Vary
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[couch_peruser]
+; If enabled, couch_peruser ensures that a private per-user database
+; exists for each document in _users. These databases are writable only
+; by the corresponding user. Databases are in the following form:
+; userdb-{hex encoded username}
+enable = false
+; If set to true and a user is deleted, the respective database gets
+; deleted as well.
+delete_dbs = false
+; Wait this many seconds after startup before attaching changes listeners
+; cluster_start_period = 5
+; Re-check cluster state at least every cluster_quiet_period seconds
+; cluster_quiet_period = 60
+
+[httpd]
+port = 5986
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_auth, cookie_authentication_handler},
{couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
+enable_cors = false
+enable_xframe_options = false
+; CouchDB can optionally enforce a maximum uri length;
+; max_uri_length = 8000
+; changes_timeout = 60000
+; config_whitelist =
+; max_uri_length =
+; rewrite_limit = 100
+; x_forwarded_host = X-Forwarded-Host
+; x_forwarded_proto = X-Forwarded-Proto
+; x_forwarded_ssl = X-Forwarded-Ssl
+; Maximum allowed http request size. Applies to both clustered and local port.
+max_http_request_size = 67108864 ; 64 MB
+
+; [httpd_design_handlers]
+; _view =
+
+; [ioq]
+; concurrency = 10
+; ratio = 0.01
+
+[ssl]
+port = 6984
+
+; [chttpd_auth]
+; authentication_db = _users
+
+; [chttpd_auth_cache]
+; max_lifetime = 600000
+; max_objects =
+; max_size = 104857600
+
+; [mem3]
+; nodes_db = _nodes
+; shard_cache_size = 25000
+; shards_db = _dbs
+; sync_concurrency = 10
+
+; [fabric]
+; all_docs_concurrency = 10
+; changes_duration =
+; shard_timeout_factor = 2
+; uuid_prefix_len = 7
+
+; [rexi]
+; buffer_count = 2000
+; server_per_node = false
+
+; [global_changes]
+; max_event_delay = 25
+; max_write_delay = 500
+; update_db = true
+
+; [view_updater]
+; min_writer_items = 100
+; min_writer_size = 16777216
+
+[couch_httpd_auth]
+; WARNING! This only affects the node-local port (5986 by default).
+; You probably want the settings under [chttpd].
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+iterations = 10 ; iterations for password hashing
+; min_iterations = 1
+; max_iterations = 1000000000
+; password_scheme = pbkdf2
+; proxy_use_secret = false
+; comma-separated list of public fields, 404 if empty
+; public_fields =
+; secret =
+; users_db_public = false
+; cookie_domain = example.com
+
+; CSP (Content Security Policy) Support for _utils
+[csp]
+enable = true
+; header_value = default-src 'self'; img-src 'self'; font-src *; script-src
'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
+
+[cors]
+credentials = false
+; List of origins separated by a comma, * means accept all
+; Origins must include the scheme: http://example.com
+; You can't set origins: * and credentials = true at the same time.
+;origins = *
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+; Configuration for a vhost
+;[cors:http://example.com]
+; credentials = false
+; List of origins separated by a comma
+; Origins must include the scheme: http://example.com
+; You can't set origins: * and credentials = true at the same time.
+;origins =
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+; Configuration for the design document cache
+;[ddoc_cache]
+; The maximum size of the cache in bytes
+;max_size = 104857600 ; 100MiB
+; The period each cache entry should wait before
+; automatically refreshing in milliseconds
+;refresh_timeout = 67000
+
+[x_frame_options]
+; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
+; If same origin is set, it will ignore the hosts setting
+; same_origin = true
+; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
+; List of hosts separated by a comma. * means accept all
+; hosts =
+
+[query_servers]
+javascript = ./bin/couchjs ./share/server/main.js
+coffeescript = ./bin/couchjs ./share/server/main-coffee.js
+
+; enable mango query engine
+[native_query_servers]
+query = {mango_native_proc, start_link, []}
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+; commit_freq = 5
+reduce_limit = false
+os_process_limit = 100
+; os_process_idle_limit = 300
+; os_process_soft_limit = 100
+; Timeout for how long a response from a busy view group server can take.
+; "infinity" is also a valid configuration value.
+;group_info_timeout = 5000
+
+[daemons]
+index_server={couch_index_server, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_proc_manager, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[mango]
+; Set to true to disable the "index all fields" text index, which can lead
+; to out of memory issues when users have documents with nested array fields.
+;index_all_disabled = false
+; Default limit value for mango _find queries.
+;default_limit = 25
+
+[indexers]
+couch_mrview = true
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "./share/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "./share/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_replicator_httpd, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_stats_httpd, handle_stats_req}
+_session = {couch_httpd_auth, handle_session_req}
+_plugins = {couch_plugins_httpd, handle_req}
+_system = {chttpd_misc, handle_system_req}
+
+[httpd_db_handlers]
+_all_docs = {couch_mrview_http, handle_all_docs_req}
+_local_docs = {couch_mrview_http, handle_local_docs_req}
+_design_docs = {couch_mrview_http, handle_design_docs_req}
+_changes = {couch_httpd_db, handle_db_changes_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_mrview_http, handle_temp_view_req}
+_view_cleanup = {couch_mrview_http, handle_cleanup_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path
section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_compact = {couch_mrview_http, handle_compact_req}
+_info = {couch_mrview_http, handle_info_req}
+_list = {couch_mrview_show, handle_view_list_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_show = {couch_mrview_show, handle_doc_show_req}
+_update = {couch_mrview_show, handle_doc_update_req}
+_view = {couch_mrview_http, handle_view_req}
+_view_changes = {couch_mrview_http, handle_view_changes_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+; [os_daemon_settings]
+; max_retries = 3
+; retry_time = 5
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix
string
+; First 14 characters are the time in hex. uuids/utc_id_suffix string
value is appended to these.
+algorithm = sequential
+; The utc_id_suffix value will be appended to uuids generated by the utc_id
algorithm.
+; Replicating instances should have unique utc_id_suffix values to ensure
uniqueness of utc_id ids.
+utc_id_suffix =
+# Maximum number of UUIDs retrievable from /_uuids in a single request
+max_count = 1000
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to
disable compression
+compressible_types = text/*, application/javascript, application/json,
application/xml
+
+[replicator]
+; Random jitter applied on replication job startup (milliseconds)
+startup_jitter = 5000
+; Number of actively running replications
+max_jobs = 500
+;Scheduling interval in milliseconds. During each reschedule cycle
+interval = 60000
+; Maximum number of replications to start and stop during rescheduling.
+max_churn = 20
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch
sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a
remote
+; database is too busy.
+connection_timeout = 30000
+; Request timeout
+;request_timeout = infinity
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 5
+; Use checkpoints
+;use_checkpoints = true
+; Checkpoint interval
+;checkpoint_interval = 30000
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is
password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation
is off).
+ssl_certificate_max_depth = 3
+; Maximum document ID length for replication.
+;max_document_id_length = 0
+; How much time to wait before retrying after a missing doc exception. This
+; exception happens if the document was seen in the changes feed, but internal
+; replication hasn't caught up yet, and fetching document's revisions
+; fails. This a common scenario when source is updated while continous
+; replication is running. The retry period would depend on how quickly internal
+; replication is expected to catch up. In general this is an optimisation to
+; avoid crashing the whole replication job, which would consume more resources
+; and add log noise.
+;missing_doc_retry_msec = 2000
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+; The daemon compacts databases and their respective view groups when all the
+; condition parameters are satisfied. Configuration can be per database or
+; global, and it has the following format:
+;
+; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+;
+; Possible parameters:
+;
+; * db_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the
database
+; file size is equal to or greater then this value, this
+; database compaction condition is satisfied.
+; This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained when
+; querying a database's information URI (GET /dbname/).
+;
+; * view_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the
view
+; index (view group) file size is equal to or greater
then
+; this value, then this view index compaction condition
is
+; satisfied. This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained
when
+; querying a view group's information URI
+; (GET /dbname/_design/groupname/_info).
+;
+; * from _and_ to - The period for which a database (and its view groups)
compaction
+; is allowed. The value for these parameters must obey the
format:
+;
+; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
+;
+; * strict_window - If a compaction is still running after the end of the
allowed
+; period, it will be canceled if this parameter is set to
'true'.
+; It defaults to 'false' and it's meaningful only if the
*period*
+; parameter is also specified.
+;
+; * parallel_view_compaction - If set to 'true', the database and its views are
+; compacted in parallel. This is only useful on
+; certain setups, like for example when the
database
+; and view index directories point to different
+; disks. It defaults to 'false'.
+;
+; Before a compaction is triggered, an estimation of how much free disk space
is
+; needed is computed. This estimation corresponds to 2 times the data size of
+; the database or view index. When there's not enough free disk space to
compact
+; a particular database or view index, a warning message is logged.
+;
+; Examples:
+;
+; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
+; The `foo` database is compacted if its fragmentation is 70% or more.
+; Any view index of this database is compacted only if its fragmentation
+; is 60% or more.
+;
+; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"},
{to, "04:00"}]
+; Similar to the preceding example but a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM.
+;
+; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"},
{to, "04:00"}, {strict_window, true}]
+; Similar to the preceding example - a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM. If at
+; 4 AM the database or one of its views is still compacting, the compaction
+; process will be canceled.
+;
+; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"},
{to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
+; Similar to the preceding example, but a database and its views can be
+; compacted in parallel.
+;
+_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
+
+[log]
+; Possible log levels:
+; debug
+; info
+; notice
+; warning, warn
+; error, err
+; critical, crit
+; alert
+; emergency, emerg
+; none
+;
+level = info
+;
+; Set the maximum log message length in bytes that will be
+; passed through the writer
+;
+; max_message_size = 16000
+;
+;
+; There are three different log writers that can be configured
+; to write log messages. The default writes to stderr of the
+; Erlang VM which is useful for debugging/development as well
+; as a lot of container deployments.
+;
+; There's also a file writer that works with logrotate and an
+; rsyslog writer for deployments that need to have logs sent
+; over the network.
+;
+writer = stderr
+;
+; File Writer Options:
+;
+; The file writer will check every 30s to see if it needs
+; to reopen its file. This is useful for people that configure
+; logrotate to move log files periodically.
+;
+; file = ./couch.log ; Path name to write logs to
+;
+; Write operations will happen either every write_buffer bytes
+; or write_delay milliseconds. These are passed directly to the
+; Erlang file module with the write_delay option documented here:
+;
+; http://erlang.org/doc/man/file.html
+;
+; write_buffer = 0
+; write_delay = 0
+;
+;
+; Syslog Writer Options:
+;
+; The syslog writer options all correspond to their obvious
+; counter parts in rsyslog nomenclature.
+;
+; syslog_host =
+; syslog_port = 514
+; syslog_appid = couchdb
+; syslog_facility = local2
+
+[stats]
+; Stats collection interval in seconds. Default 10 seconds.
+;interval = 10
diff --git a/ansible/roles/couchdb/files/local.d/README
b/ansible/roles/couchdb/files/local.d/README
new file mode 100644
index 0000000000..5cc9ed123e
--- /dev/null
+++ b/ansible/roles/couchdb/files/local.d/README
@@ -0,0 +1,8 @@
+CouchDB local configuration files
+
+Files found under the etc/local.d directory that end with .ini are parsed
+within couchdb(1) at startup.
+
+This directory is intended for system administrator overrides of CouchDB
+defaults. Package maintainers should be placing overrides in the
+etc/default.d directory instead.
diff --git a/ansible/roles/couchdb/files/local.d/local.ini
b/ansible/roles/couchdb/files/local.d/local.ini
new file mode 100644
index 0000000000..c1bac9e2dc
--- /dev/null
+++ b/ansible/roles/couchdb/files/local.d/local.ini
@@ -0,0 +1,11 @@
+; CouchDB Configuration Settings
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[chttpd]
+bind_address = any
+
+[httpd]
+bind_address = any
diff --git a/ansible/roles/couchdb/files/local.ini
b/ansible/roles/couchdb/files/local.ini
new file mode 100644
index 0000000000..cd3080ecf2
--- /dev/null
+++ b/ansible/roles/couchdb/files/local.ini
@@ -0,0 +1,113 @@
+; CouchDB Configuration Settings
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[couchdb]
+;max_document_size = 4294967296 ; bytes
+;os_process_timeout = 5000
+
+[couch_peruser]
+; If enabled, couch_peruser ensures that a private per-user database
+; exists for each document in _users. These databases are writable only
+; by the corresponding user. Databases are in the following form:
+; userdb-{hex encoded username}
+;enable = true
+; If set to true and a user is deleted, the respective database gets
+; deleted as well.
+;delete_dbs = true
+
+[chttpd]
+;port = 5984
+;bind_address = 127.0.0.1
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+[httpd]
+; NOTE that this only configures the "backend" node-local port, not the
+; "frontend" clustered port. You probably don't want to change anything in
+; this section.
+; Uncomment next line to trigger basic-auth popup on unauthorized requests.
+;WWW-Authenticate = Basic realm="administrator"
+
+; Uncomment next line to set the configuration modification whitelist. Only
+; whitelisted values may be changed via the /_config URLs. To allow the admin
+; to change this value over HTTP, remember to include {httpd,config_whitelist}
+; itself. Excluding it from the list would require editing this file to update
+; the whitelist.
+;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
+
+[query_servers]
+;nodejs = /usr/local/bin/couchjs-node /path/to/couchdb/share/server/main.js
+
+
+[httpd_global_handlers]
+;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
+
+[couch_httpd_auth]
+; If you set this to true, you should also uncomment the WWW-Authenticate line
+; above. If you don't configure a WWW-Authenticate header, CouchDB will send
+; Basic realm="server" in order to prevent you getting logged out.
+; require_valid_user = false
+
+[os_daemons]
+; For any commands listed here, CouchDB will attempt to ensure that
+; the process remains alive. Daemons should monitor their environment
+; to know when to exit. This can most easily be accomplished by exiting
+; when stdin is closed.
+;foo = /path/to/command -with args
+
+[daemons]
+; enable SSL support by uncommenting the following line and supply the PEM's
below.
+; the default ssl port CouchDB listens on is 6984
+; httpsd = {chttpd, start_link, [https]}
+
+[ssl]
+;cert_file = /full/path/to/server_cert.pem
+;key_file = /full/path/to/server_key.pem
+;password = somepassword
+; set to true to validate peer certificates
+;verify_ssl_certificates = false
+; Set to true to fail if the client does not send a certificate. Only used if
verify_ssl_certificates is true.
+;fail_if_no_peer_cert = false
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+;cacert_file = /full/path/to/cacertf
+; The verification fun (optional) if not specified, the default
+; verification fun will be used.
+;verify_fun = {Module, VerifyFun}
+; maximum peer certificate depth
+;ssl_certificate_max_depth = 1
+;
+; Reject renegotiations that do not live up to RFC 5746.
+;secure_renegotiate = true
+; The cipher suites that should be supported.
+; Can be specified in erlang format "{ecdhe_ecdsa,aes_128_cbc,sha256}"
+; or in OpenSSL format "ECDHE-ECDSA-AES128-SHA256".
+;ciphers = ["ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA"]
+; The SSL/TLS versions to support
+;tls_versions = [tlsv1, 'tlsv1.1', 'tlsv1.2']
+
+; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All
requests to
+; the Virual Host will be redirected to the path. In the example below all
requests
+; to http://example.com/ are redirected to /database.
+; If you run CouchDB on a specific port, include the port number in the vhost:
+; example.com:5984 = /database
+[vhosts]
+;example.com = /database/
+
+[update_notification]
+;unique notifier name=/full/path/to/exe -with "cmd line arg"
+
+; To create an admin account uncomment the '[admins]' section below and add a
+; line in the format 'username = password'. When you next start CouchDB, it
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
+; changing this.
+[admins]
+;admin = mysecretpassword
diff --git a/ansible/roles/couchdb/files/vm.args
b/ansible/roles/couchdb/files/vm.args
new file mode 100644
index 0000000000..7592d5c510
--- /dev/null
+++ b/ansible/roles/couchdb/files/vm.args
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Ensure that the Erlang VM listens on a known port
+-kernel inet_dist_listen_min 9100
+-kernel inet_dist_listen_max 9100
+
+# Tell kernel and SASL not to log anything
+-kernel error_logger silent
+-sasl sasl_error_logger false
+
+# All nodes must share the same magic cookie for distributed Erlang to work.
+# # Comment out this line if you synchronized the cookies by other means (using
+# # the ~/.erlang.cookie file, for example).
+-setcookie openwhisk
+
+# Use kernel poll functionality if supported by emulator
++K true
+
+# Start a pool of asynchronous IO threads
++A 16
+
+# Comment this line out to enable the interactive Erlang shell on startup
++Bd -noinput
diff --git a/ansible/roles/couchdb/tasks/deploy.yml
b/ansible/roles/couchdb/tasks/deploy.yml
index 0dedb7cf26..c9184e251b 100644
--- a/ansible/roles/couchdb/tasks/deploy.yml
+++ b/ansible/roles/couchdb/tasks/deploy.yml
@@ -9,23 +9,39 @@
fail: msg="The db provider in your {{ hosts_dir }}/group_vars/all is {{
db_provider }}, it has to be CouchDB, pls double check"
when: db_provider != "CouchDB"
-- name: check for persistent disk
- shell: df -h
- register: disk_status
- when: block_device is defined
-
- name: "set the volume_dir"
- vars:
- instance: "{{instances | selectattr('name', 'equalto', 'db') | list |
first}}"
set_fact:
- volume_dir: "{{ instance.volume.fsmount | default( '/mnt/' +
group_names|first, true ) }}:/usr/local/var/lib/couchdb"
- when: (block_device is defined) and (block_device in disk_status.stdout)
+ volume_dir: "{% if db_datadir is defined and db_confdir is defined %}[
'{{ db_confdir }}:/opt/couchdb/etc', '{{ db_datadir }}:/opt/couchdb/data' ]{%
elif db_datadir is undefined %}{{ db_confdir }}:/opt/couchdb/etc{% else %}{{
db_datadir }}:/opt/couchdb/data{%- endif %}"
+ when: db_datadir is defined or db_confdir is defined
+
+- name: ensure CouchDB config directory exists
+ file:
+ path: "{{ db_confdir }}"
+ state: directory
+ become: "{{ db.dir.become }}"
+ become_user: root
+ when: db_confdir is defined
+
+- name: synchronize default configuration files from local to remote in
CouchDB config directory
+ synchronize:
+ src: "files/"
+ dest: "{{ db_confdir }}"
+ delete: yes
+ recursive: yes
+ become: "{{ db.dir.become }}"
+ when: db_confdir is defined
- name: "pull the apache/couchdb:{{ couchdb.version }} image"
shell: "docker pull apache/couchdb:{{ couchdb.version }}"
retries: "{{ docker.pull.retries }}"
delay: "{{ docker.pull.delay }}"
+- name: "check configuration files"
+ shell: "ls -l {{ db_confdir }}"
+
+- name: "check configuration files"
+ shell: "cat /etc/passwd"
+
- name: (re)start CouchDB
docker_container:
name: couchdb
@@ -34,6 +50,7 @@
recreate: true
restart_policy: "{{ docker.restart.policy }}"
volumes: "{{volume_dir | default([])}}"
+ user: root
ports:
- "{{ db_port }}:5984"
- "4369:4369"
@@ -43,9 +60,36 @@
COUCHDB_PASSWORD: "{{ db_password }}"
NODENAME: "{{ ansible_host }}"
+- name: "check configuration files"
+ shell: "docker exec -i couchdb whoami"
+
+- name: "check configuration files"
+ shell: "docker exec -i couchdb id"
+ `
+- name: "check configuration files"
+ shell: "whoami"
+
+- name: "check configuration files"
+ shell: "ps -ef | grep couchdb"
+
+- name: "check logs"
+ shell: "docker logs -f couchdb"
+
+- name: wait until CouchDB in this host is up and running
+ uri:
+ url: "{{ db_protocol }}://{{ ansible_host }}:{{ db_port }}/"
+ user: "{{ db_username }}"
+ password: "{{ db_password }}"
+ register: result
+ until: result.status == 200
+ retries: 12
+ delay: 5
+
- name: wait until CouchDB in this host is up and running
uri:
url: "{{ db_protocol }}://{{ ansible_host }}:{{ db_port }}/_utils/"
+ user: "{{ db_username }}"
+ password: "{{ db_password }}"
register: result
until: result.status == 200
retries: 12
@@ -72,7 +116,7 @@
user: "{{ db_username }}"
password: "{{ db_password }}"
force_basic_auth: yes
- when: (inventory_hostname == coordinator) and (db.instances|int >= 2)
+ when: inventory_hostname == coordinator
- name: add remote nodes to the cluster
uri:
@@ -98,4 +142,4 @@
user: "{{ db_username }}"
password: "{{ db_password }}"
force_basic_auth: yes
- when: (inventory_hostname == coordinator) and (db.instances|int >= 2)
+ when: inventory_hostname == coordinator
diff --git a/tests/src/test/scala/ha/ShootComponentsTests.scala
b/tests/src/test/scala/ha/ShootComponentsTests.scala
index e3480bca86..ec56c2c4b3 100644
--- a/tests/src/test/scala/ha/ShootComponentsTests.scala
+++ b/tests/src/test/scala/ha/ShootComponentsTests.scala
@@ -110,8 +110,8 @@ class ShootComponentsTests
def isDBAlive(instance: Int): Boolean = {
require(instance >= 0 && instance < 2, "DB instance not known.")
- val host = WhiskProperties.getProperty("db.hosts").split(",")(instance)
- val port = dbPort + instance
+ val host = WhiskProperties.getDBHosts.split(",")(instance)
+ val port = WhiskProperties.getDBPort + instance
val res = ping(host, port)
res == Some(
diff --git
a/tests/src/test/scala/whisk/core/database/test/ConfigurationTests.scala
b/tests/src/test/scala/whisk/core/database/test/ConfigurationTests.scala
new file mode 100644
index 0000000000..88e6efe375
--- /dev/null
+++ b/tests/src/test/scala/whisk/core/database/test/ConfigurationTests.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package whisk.core.database.test
+
+import akka.http.scaladsl.Http
+import akka.http.scaladsl.model._
+import akka.http.scaladsl.model.headers.{Authorization, BasicHttpCredentials}
+import akka.http.scaladsl.unmarshalling.Unmarshal
+import akka.stream.ActorMaterializer
+import common.{StreamLogging, WskActorSystem}
+import org.junit.runner.RunWith
+import org.scalatest.FlatSpec
+import org.scalatest.junit.JUnitRunner
+
+import scala.concurrent.Await
+import scala.concurrent.duration.DurationInt
+
+@RunWith(classOf[JUnitRunner])
+class ConfigurationTests extends FlatSpec with DatabaseScriptTestUtils with
StreamLogging with WskActorSystem {
+
+ implicit val materializer = ActorMaterializer()
+ val authHeader = Authorization(BasicHttpCredentials(dbUsername, dbPassword))
+
+ behavior of "CouchDB Configuration"
+
+ it should "include reduce_limit as false" in {
+
+ val request = Http()
+ .singleRequest(
+ HttpRequest(
+ method = HttpMethods.GET,
+ uri =
Uri(s"${dbUrl}/_node/couchdb@${dbHost}/_config/couchdb/default_security"),
+ headers = List(authHeader)))
+ .flatMap { response =>
+ Unmarshal(response).to[String].map { resBody =>
+ withClue(s"Error in Body: $resBody")(response.status shouldBe
StatusCodes.OK)
+ resBody.trim.replace("\"", "") shouldBe "admin_only"
+ resBody
+ }
+ }
+
+ Await.result(request, 15.seconds)
+ }
+}
diff --git a/tools/travis/build.sh b/tools/travis/build.sh
index ee2750db57..e9f4c22d5a 100755
--- a/tools/travis/build.sh
+++ b/tools/travis/build.sh
@@ -46,7 +46,7 @@ fi
cd $ROOTDIR/ansible
-ANSIBLE_CMD="ansible-playbook -i environments/local -e
docker_image_prefix=testing"
+ANSIBLE_CMD="ansible-playbook -i environments/local -e
docker_image_prefix=testing -vvv"
GRADLE_PROJS_SKIP="-x :actionRuntimes:pythonAction:distDocker -x
:actionRuntimes:python2Action:distDocker -x
actionRuntimes:swift3.1.1Action:distDocker -x
actionRuntimes:swift4.1Action:distDocker -x
:actionRuntimes:javaAction:distDocker"
$ANSIBLE_CMD setup.yml -e mode=HA
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services