SaketaChalamchala commented on PR #4271:
URL: https://github.com/apache/ozone/pull/4271#issuecomment-1433876843

   Similar list from other services
   ```
   SCM : 
   /************************************************************
   STARTUP_MSG: Starting StorageContainerManager
   ...
   STARTUP_MSG:   java = 11.0.17
   STARTUP_MSG:   conf = {adl.feature.ownerandgroup.enableupn=false, 
adl.http.timeout=-1, datanode.https.port=50475, dfs.balancer.address=0.0.0.0:0, 
dfs.balancer.block-move.timeout=0, dfs.balancer.dispatcherThreads=200, 
dfs.balancer.getBlocks.min-block-size=10485760, 
dfs.balancer.getBlocks.size=2147483648, dfs.balancer.keytab.enabled=false, 
dfs.balancer.max-iteration-time=1200000, 
dfs.balancer.max-no-move-interval=60000, 
dfs.balancer.max-size-to-move=10737418240, dfs.balancer.movedWinWidth=5400000, 
dfs.balancer.moverThreads=1000, dfs.balancer.service.interval=5m, 
dfs.balancer.service.retries.on.exception=5, dfs.batched.ls.limit=100, 
dfs.block.access.key.update.interval=600, dfs.block.access.token.enable=false, 
dfs.block.access.token.lifetime=600, ..., 
dfs.datanode.balance.bandwidthPerSec=100m, 
dfs.datanode.balance.max.concurrent.moves=100, 
dfs.datanode.block-pinning.enabled=false, 
dfs.datanode.block.id.layout.upgrade.threads=6, 
dfs.datanode.bp-ready.timeout=20, dfs.datanode.cache.rev
 ocation.polling.ms=500, dfs.datanode.cache.revocation.timeout.ms=900000, 
dfs.datanode.cached-dfsused.check.interval.ms=600000, 
dfs.datanode.data.dir=file://${hadoop.tmp.dir}/dfs/data, 
dfs.datanode.data.dir.perm=700, dfs.datanode.data.transfer.bandwidthPerSec=0, 
dfs.datanode.data.write.bandwidthPerSec=0, 
dfs.datanode.directoryscan.interval=21600, 
dfs.datanode.directoryscan.threads=1, 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec=1000, 
dfs.datanode.disk.check.min.gap=15m, ... dfs.use.dfs.network.topology=true, 
dfs.user.home.dir.prefix=/user, 
dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab, 
dfs.web.authentication.kerberos.principal=HTTP/[email protected], 
dfs.webhdfs.acl.provider.permission.pattern=^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$,
 dfs.webhdfs.netty.high.watermark=65535, dfs.webhdfs.netty.low.watermark=32768, 
dfs.webhdfs.oauth2.enable
 d=false, dfs.webhdfs.rest-csrf.browser-useragents-regex=^Mozilla.*,^Opera.*, 
dfs.webhdfs.rest-csrf.custom-header=X-XSRF-HEADER, 
dfs.webhdfs.rest-csrf.enabled=false, 
dfs.webhdfs.rest-csrf.methods-to-ignore=GET,OPTIONS,HEAD,TRACE, 
dfs.webhdfs.socket.connect-timeout=60s, dfs.webhdfs.socket.read-timeout=60s, 
dfs.webhdfs.ugi.expire.after.access=600000, dfs.webhdfs.use.ipc.callq=true, 
dfs.webhdfs.user.provider.user.pattern=^[A-Za-z_][A-Za-z0-9._-]*[$]?$, 
dfs.xframe.enabled=true, dfs.xframe.value=SAMEORIGIN, file.blocksize=67108864, 
file.bytes-per-checksum=512, file.client-write-packet-size=65536, 
file.replication=1, file.stream-buffer-size=4096, 
fs.AbstractFileSystem.abfs.impl=org.apache.hadoop.fs.azurebfs.Abfs, 
fs.AbstractFileSystem.abfss.impl=org.apache.hadoop.fs.azurebfs.Abfss, 
fs.AbstractFileSystem.adl.impl=org.apache.hadoop.fs.adl.Adl, 
fs.AbstractFileSystem.file.impl=org.apache.hadoop.fs.local.LocalFs, 
fs.AbstractFileSystem.ftp.impl=org.apache.hadoop.fs.ftp.FtpFs, 
fs.AbstractFileSyst
 em.gs.impl=com.google.cloud.hadoop.fs.gcs.GoogleHadoopFS, 
fs.AbstractFileSystem.har.impl=org.apache.hadoop.fs.HarFs, 
fs.AbstractFileSystem.hdfs.impl=org.apache.hadoop.fs.Hdfs, 
fs.AbstractFileSystem.s3a.impl=org.apache.hadoop.fs.s3a.S3A, 
fs.AbstractFileSystem.swebhdfs.impl=org.apache.hadoop.fs.SWebHdfs, 
fs.AbstractFileSystem.viewfs.impl=org.apache.hadoop.fs.viewfs.ViewFs, 
fs.AbstractFileSystem.wasb.impl=org.apache.hadoop.fs.azure.Wasb, 
fs.AbstractFileSystem.wasbs.impl=org.apache.hadoop.fs.azure.Wasbs, 
fs.AbstractFileSystem.webhdfs.impl=org.apache.hadoop.fs.WebHdfs, 
fs.abfs.impl=org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem, 
fs.abfss.impl=org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem, 
fs.adl.impl=org.apache.hadoop.fs.adl.AdlFileSystem, 
fs.adl.oauth2.access.token.provider.type=<redacted>, ...
       , ozone.scm.names=scm, 
ozone.scm.network.topology.schema.file=network-topology-default.xml, 
ozone.scm.pipeline.allocated.timeout=5m, 
ozone.scm.pipeline.creation.auto.factor.one=true, 
ozone.scm.pipeline.creation.interval=30s, 
ozone.scm.pipeline.destroy.timeout=66s, ozone.scm.pipeline.leader-choose.policy=
         
org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.MinLeaderCountChoosePolicy
       , ozone.scm.pipeline.owner.container.count=1, 
ozone.scm.pipeline.per.metadata.disk=2, ozone.scm.pipeline.scrub.interval=5m, 
ozone.scm.ratis.pipeline.limit=0, ozone.scm.ratis.port=9894, 
ozone.scm.security.handler.count.key=2, 
ozone.scm.security.service.bind.host=0.0.0.0, 
ozone.scm.security.service.port=9961, ozone.scm.sequence.id.batch.size=1000, 
ozone.scm.skip.bootstrap.validation=false, ozone.scm.stale.node.interval=30s, 
ozone.scm.update.client.crl.check.interval=600s, 
ozone.scm.update.service.port=9893, ozone.security.enabled=true, 
ozone.security.http.kerberos.enabled=true, ozone.server.default.replication=3, 
ozone.server.default.replication.type=RATIS, 
ozone.service.shutdown.timeout=60s, ozone.snapshot.filtering.limit.per.task=2, 
ozone.snapshot.filtering.service.interval=1m, 
ozone.sst.filtering.service.timeout=300000ms, 
ozone.tags.system=OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
         
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON,
 ozone.trace.enabled=false, recon.om.delta.update.limit=2000, 
recon.om.delta.update.loop.limit=10, rpc.metrics.percentiles.intervals=60,300, 
rpc.metrics.quantile.enable=true, rpc.metrics.timeunit=MILLISECONDS, 
scm.container.client.idle.threshold=10s, scm.container.client.max.size=256, 
seq.io.sort.factor=100, seq.io.sort.mb=100, tfile.fs.input.buffer.size=262144, 
tfile.fs.output.buffer.size=262144, tfile.io.chunk.size=1048576}l=300s, 
ozone.recon.task.safemode.wait.threshold=300s, ozone.recon.task.thread.count=1, 
ozone.service.shutdown.timeout=60s, recon.om.delta.update.limit=2000, 
recon.om.delta.update.loop.limit=10}
   ```
   
   ```
   DN: 
   /************************************************************
   STARTUP_MSG: Starting HddsDatanodeService
   ...
   STARTUP_MSG:   java = 11.0.17
   STARTUP_MSG:   conf = {adl.feature.ownerandgroup.enableupn=false, 
adl.http.timeout=-1, datanode.https.port=50475, dfs.balancer.address=0.0.0.0:0, 
dfs.balancer.block-move.timeout=0, dfs.balancer.dispatcherThreads=200, 
dfs.balancer.getBlocks.min-block-size=10485760, 
dfs.balancer.getBlocks.size=2147483648, dfs.balancer.keytab.enabled=false, 
dfs.balancer.max-iteration-time=1200000, 
dfs.balancer.max-no-move-interval=60000, 
dfs.balancer.max-size-to-move=10737418240, dfs.balancer.movedWinWidth=5400000, 
dfs.balancer.moverThreads=1000, dfs.balancer.service.interval=5m, ..., 
fs.adl.oauth2.access.token.provider.type=<redacted>, fs.automatic.close=true, 
fs.azure.authorization=false, fs.azure.authorization.caching.enable=true, 
fs.azure.buffer.dir=${hadoop.tmp.dir}/abfs, fs.azure.local.sas.key.mode=false, 
fs.azure.sas.expiry.period=90d, 
fs.azure.saskey.usecontainersaskeyforallaccess=true, 
fs.azure.secure.mode=false, fs.azure.user.agent.prefix=unknown, 
fs.client.resolve.remote.symlinks=true, fs.
 client.resolve.topology.enabled=false, fs.defaultFS=ofs://om, 
fs.df.interval=60000, fs.du.interval=600000, 
fs.ftp.data.connection.mode=ACTIVE_LOCAL_DATA_CONNECTION_MODE, 
fs.ftp.host=0.0.0.0, fs.ftp.host.port=21, 
fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem, fs.ftp.timeout=0, 
fs.ftp.transfer.mode=BLOCK_TRANSFER_MODE, fs.getspaceused.jitterMillis=60000, 
fs.har.impl.disable.cache=true, fs.permissions.umask-mode=022, 
fs.s3a.accesspoint.required=false, ..., 
ozone.scm.heartbeat.log.warn.interval.count=10, 
ozone.scm.heartbeat.rpc-retry-count=15, 
ozone.scm.heartbeat.rpc-retry-interval=1s, ozone.scm.heartbeat.rpc-timeout=5s, 
ozone.scm.heartbeat.thread.interval=3s, ozone.scm.http-address=scm:9876, 
ozone.scm.http-bind-host=0.0.0.0, ozone.scm.http.enabled=true, 
ozone.scm.https-address=0.0.0.0:9877, ozone.scm.https-bind-host=0.0.0.0, 
ozone.scm.info.wait.duration=10m, 
ozone.scm.keyvalue.container.deletion-choosing.policy=
         
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
       , ozone.scm.names=scm, 
ozone.scm.network.topology.schema.file=network-topology-default.xml, 
ozone.scm.pipeline.allocated.timeout=5m, 
ozone.scm.pipeline.creation.auto.factor.one=true, 
ozone.scm.pipeline.creation.interval=30s, 
ozone.scm.pipeline.destroy.timeout=66s, ozone.scm.pipeline.leader-choose.policy=
         
org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.MinLeaderCountChoosePolicy
       , ozone.scm.pipeline.owner.container.count=1, 
ozone.scm.pipeline.per.metadata.disk=2, ozone.scm.pipeline.scrub.interval=5m, 
ozone.scm.ratis.pipeline.limit=0, ozone.scm.ratis.port=9894, 
ozone.scm.security.handler.count.key=2, 
ozone.scm.security.service.bind.host=0.0.0.0, 
ozone.scm.security.service.port=9961, ozone.scm.sequence.id.batch.size=1000, 
ozone.scm.skip.bootstrap.validation=false, ozone.scm.stale.node.interval=30s, 
ozone.scm.update.client.crl.check.interval=600s, 
ozone.scm.update.service.port=9893, ozone.security.enabled=true, 
ozone.security.http.kerberos.enabled=true, ozone.server.default.replication=3, 
ozone.server.default.replication.type=RATIS, 
ozone.service.shutdown.timeout=60s, ozone.snapshot.filtering.limit.per.task=2, 
ozone.snapshot.filtering.service.interval=1m, 
ozone.sst.filtering.service.timeout=300000ms, 
ozone.tags.system=OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
         
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON,
 ozone.trace.enabled=false, recon.om.delta.update.limit=2000, 
recon.om.delta.update.loop.limit=10, rpc.metrics.percentiles.intervals=60,300, 
rpc.metrics.quantile.enable=true, rpc.metrics.timeunit=MILLISECONDS, 
scm.container.client.idle.threshold=10s, scm.container.client.max.size=256, 
seq.io.sort.factor=100, seq.io.sort.mb=100, tfile.fs.input.buffer.size=262144, 
tfile.fs.output.buffer.size=262144, tfile.io.chunk.size=1048576}
   ```
   
   ```
   S3G: 
   /************************************************************
   ...
   STARTUP_MSG:   java = 11.0.17
   STARTUP_MSG:   conf = {adl.feature.ownerandgroup.enableupn=false, 
adl.http.timeout=-1, bind.address=0.0.0.0, datanode.https.port=50475, 
dfs.balancer.address=0.0.0.0:0, dfs.balancer.block-move.timeout=0, 
dfs.balancer.dispatcherThreads=200, 
dfs.balancer.getBlocks.min-block-size=10485760, 
dfs.balancer.getBlocks.size=2147483648, dfs.balancer.keytab.enabled=false, 
dfs.balancer.max-iteration-time=1200000, 
dfs.balancer.max-no-move-interval=60000, 
dfs.balancer.max-size-to-move=10737418240, dfs.balancer.movedWinWidth=5400000, 
dfs.balancer.moverThreads=1000, dfs.balancer.service.interval=5m, 
dfs.balancer.service.retries.on.exception=5, dfs.batched.ls.limit=100, 
dfs.block.access.key.update.interval=600, dfs.block.access.token.enable=false, 
dfs.block.access.token.lifetime=600, ..., 
fs.adl.oauth2.access.token.provider.type=<redacted>, fs.automatic.close=true, 
fs.azure.authorization=false, fs.azure.authorization.caching.enable=true, 
fs.azure.buffer.dir=${hadoop.tmp.dir}/abfs, fs.azure.local.sas
 .key.mode=false, fs.azure.sas.expiry.period=90d, 
fs.azure.saskey.usecontainersaskeyforallaccess=true, 
fs.azure.secure.mode=false, fs.azure.user.agent.prefix=unknown, 
fs.client.resolve.remote.symlinks=true, 
fs.client.resolve.topology.enabled=false, fs.defaultFS=ofs://om, 
fs.df.interval=60000, fs.du.interval=600000, 
fs.ftp.data.connection.mode=ACTIVE_LOCAL_DATA_CONNECTION_MODE, 
fs.ftp.host=0.0.0.0, fs.ftp.host.port=21, 
fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem, fs.ftp.timeout=0, 
fs.ftp.transfer.mode=BLOCK_TRANSFER_MODE, fs.getspaceused.jitterMillis=60000, 
fs.har.impl.disable.cache=true, fs.permissions.umask-mode=022, 
fs.s3a.accesspoint.required=false, 
fs.s3a.assumed.role.credentials.provider=org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider,
 fs.s3a.assumed.role.session.duration=30m, fs.s3a.attempts.maximum=20, 
fs.s3a.aws.credentials.provider=
       org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider,
       org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider,
       com.amazonaws.auth.EnvironmentVariableCredentialsProvider,
       org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider
     , fs.s3a.block.size=32M, fs.s3a.buffer.dir=${hadoop.tmp.dir}/s3a, 
fs.s3a.change.detection.mode=server, fs.s3a.change.detection.source=etag, 
fs.s3a.change.detection.version.required=true, 
fs.s3a.committer.abort.pending.uploads=true, 
fs.s3a.committer.magic.enabled=true, fs.s3a.committer.name=file, 
fs.s3a.committer.staging.conflict-mode=append, 
fs.s3a.committer.staging.tmp.path=tmp/staging, 
fs.s3a.committer.staging.unique-filenames=true, fs.s3a.committer.threads=8, 
fs.s3a.connection.establish.timeout=5000, fs.s3a.connection.maximum=96, 
fs.s3a.connection.request.timeout=0, ..., ozone.scm.ratis.pipeline.limit=0, 
ozone.scm.ratis.port=9894, ozone.scm.security.handler.count.key=2, 
ozone.scm.security.service.bind.host=0.0.0.0, 
ozone.scm.security.service.port=9961, ozone.scm.sequence.id.batch.size=1000, 
ozone.scm.skip.bootstrap.validation=false, ozone.scm.stale.node.interval=30s, 
ozone.scm.update.client.crl.check.interval=600s, 
ozone.scm.update.service.port=9893, ozone.security.enabled=tr
 ue, ozone.security.http.kerberos.enabled=true, 
ozone.server.default.replication=3, 
ozone.server.default.replication.type=RATIS, 
ozone.service.shutdown.timeout=60s, ozone.snapshot.filtering.limit.per.task=2, 
ozone.snapshot.filtering.service.interval=1m, 
ozone.sst.filtering.service.timeout=300000ms, 
ozone.tags.system=OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
         
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON,
 ozone.trace.enabled=false, recon.om.delta.update.limit=2000, 
recon.om.delta.update.loop.limit=10, rpc.metrics.percentiles.intervals=60,300, 
rpc.metrics.quantile.enable=true, rpc.metrics.timeunit=MILLISECONDS, 
scm.container.client.idle.threshold=10s, scm.container.client.max.size=256, 
seq.io.sort.factor=100, seq.io.sort.mb=100, tfile.fs.input.buffer.size=262144, 
tfile.fs.output.buffer.size=262144, tfile.io.chunk.size=1048576}
   ```
   
   ```
   Ozone CSI:
   sh-4.2$ ozone csi -conf=/etc/hadoop/x --verbose
   ...
   /************************************************************
   STARTUP_MSG: Starting CsiServer
   ...
   STARTUP_MSG:   java = 11.0.17
   STARTUP_MSG:   conf = {adl.feature.ownerandgroup.enableupn=false, 
adl.http.timeout=-1, datanode.https.port=50475, dfs.balancer.address=0.0.0.0:0, 
dfs.balancer.block-move.timeout=0, dfs.balancer.dispatcherThreads=200, 
dfs.balancer.getBlocks.min-block-size=10485760, 
dfs.balancer.getBlocks.size=2147483648, dfs.balancer.keytab.enabled=false, 
dfs.balancer.max-iteration-time=1200000, 
dfs.balancer.max-no-move-interval=60000, 
dfs.balancer.max-size-to-move=10737418240, dfs.balancer.movedWinWidth=5400000, 
dfs.balancer.moverThreads=1000, dfs.balancer.service.interval=5m, ..., 
fs.adl.oauth2.access.token.provider.type=<redacted>, fs.automatic.close=true, 
fs.azure.authorization=false, fs.azure.authorization.caching.enable=true, 
fs.azure.buffer.dir=${hadoop.tmp.dir}/abfs, fs.azure.local.sas.key.mode=false, 
fs.azure.sas.expiry.period=90d, 
fs.azure.saskey.usecontainersaskeyforallaccess=true, 
fs.azure.secure.mode=false, fs.azure.user.agent.prefix=unknown, 
fs.client.resolve.remote.symlinks=true, fs.
 client.resolve.topology.enabled=false, fs.defaultFS=ofs://om, 
fs.df.interval=60000, fs.du.interval=600000, 
fs.ftp.data.connection.mode=ACTIVE_LOCAL_DATA_CONNECTION_MODE, 
fs.ftp.host=0.0.0.0, fs.ftp.host.port=21, 
fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem, fs.ftp.timeout=0, 
fs.ftp.transfer.mode=BLOCK_TRANSFER_MODE, fs.getspaceused.jitterMillis=60000, 
fs.har.impl.disable.cache=true, fs.permissions.umask-mode=022, 
fs.s3a.accesspoint.required=false, 
fs.s3a.assumed.role.credentials.provider=org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider,
 fs.s3a.assumed.role.session.duration=30m, fs.s3a.attempts.maximum=20, 
fs.s3a.aws.credentials.provider=
       org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider,
       org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider,
       com.amazonaws.auth.EnvironmentVariableCredentialsProvider,
       org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider
     , fs.s3a.block.size=32M, fs.s3a.buffer.dir=${hadoop.tmp.dir}/s3a, 
fs.s3a.change.detection.mode=server, fs.s3a.change.detection.source=etag, 
fs.s3a.change.detection.version.required=true, ..., 
ozone.scm.security.service.port=9961, ozone.scm.sequence.id.batch.size=1000, 
ozone.scm.skip.bootstrap.validation=false, ozone.scm.stale.node.interval=30s, 
ozone.security.enabled=true, ozone.security.http.kerberos.enabled=true, 
ozone.server.default.replication=3, 
ozone.server.default.replication.type=RATIS, 
ozone.service.shutdown.timeout=60s, ozone.snapshot.filtering.limit.per.task=2, 
ozone.snapshot.filtering.service.interval=1m, 
ozone.sst.filtering.service.timeout=300000ms, 
ozone.tags.system=OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
         
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON,
 ozone.trace.enabled=false, recon.om.delta.update.limit=2000, 
recon.om.delta.update.loop.limit=10, rpc.metrics.percentiles.intervals=60,300, 
rpc.metrics.quantile.enable=true, rpc.metrics.timeunit=MILLISECONDS, 
scm.container.client.idle.threshold=10s, scm.container.client.max.size=256, 
seq.io.sort.factor=100, seq.io.sort.mb=100, tfile.fs.input.buffer.size=262144, 
tfile.fs.output.buffer.size=262144, tfile.io.chunk.size=1048576}
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to