It was actually needed to rewrite the master-config.yaml in this other way,
basically removing all the :8443 strings in the 'public' fields, i.e. to
make it implicitly appear as :443
admissionConfig:
pluginConfig:
BuildDefaults:
configuration:
apiVersion: v1
env: []
kind: BuildDefaultsConfig
resources:
limits: {}
requests: {}
BuildOverrides:
configuration:
apiVersion: v1
kind: BuildOverridesConfig
PodPreset:
configuration:
apiVersion: v1
disable: false
kind: DefaultAdmissionConfig
openshift.io/ImagePolicy:
configuration:
apiVersion: v1
executionRules:
- matchImageAnnotations:
- key: images.openshift.io/deny-execution
value: 'true'
name: execution-denied
onResources:
- resource: pods
- resource: builds
reject: true
skipOnResolutionFailure: true
kind: ImagePolicyConfig
aggregatorConfig:
proxyClientInfo:
certFile: aggregator-front-proxy.crt
keyFile: aggregator-front-proxy.key
apiLevels:
- v1
apiVersion: v1
assetConfig:
extensionScripts:
- /etc/origin/master/openshift-ansible-catalog-console.js
logoutURL: ""
masterPublicURL: https://hosting.wfp.org <----
metricsPublicURL: https://metrics.hosting.wfp.org/hawkular/metrics
publicURL: https://hosting.wfp.org/console/ <----
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
authConfig:
requestHeader:
clientCA: front-proxy-ca.crt
clientCommonNames:
- aggregator-front-proxy
extraHeaderPrefixes:
- X-Remote-Extra-
groupHeaders:
- X-Remote-Group
usernameHeaders:
- X-Remote-User
controllerConfig:
election:
lockName: openshift-master-controllers
serviceServingCert:
signer:
certFile: service-signer.crt
keyFile: service-signer.key
controllers: '*'
corsAllowedOrigins:
- (?i)//127\.0\.0\.1(:|\z)
- (?i)//localhost(:|\z)
- (?i)//10\.11\.41\.85(:|\z)
- (?i)//kubernetes\.default(:|\z)
- (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z)
- (?i)//kubernetes(:|\z)
- (?i)//openshift\.default(:|\z)
- (?i)//hosting\.wfp\.org(:|\z)
- (?i)//openshift\.default\.svc(:|\z)
- (?i)//172\.30\.0\.1(:|\z)
- (?i)//wfpromshap21\.global\.wfp\.org(:|\z)
- (?i)//openshift\.default\.svc\.cluster\.local(:|\z)
- (?i)//kubernetes\.default\.svc(:|\z)
- (?i)//openshift(:|\z)
dnsConfig:
bindAddress: 0.0.0.0:8053
bindNetwork: tcp4
etcdClientInfo:
ca: master.etcd-ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
- https://wfpromshap21.global.wfp.org:2379
- https://wfpromshap22.global.wfp.org:2379
- https://wfpromshap23.global.wfp.org:2379
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
openShiftStoragePrefix: openshift.io
openShiftStorageVersion: v1
imageConfig:
format: openshift/origin-${component}:${version}
latest: false
kind: MasterConfig
kubeletClientInfo:
ca: ca-bundle.crt
certFile: master.kubelet-client.crt
keyFile: master.kubelet-client.key
port: 10250
kubernetesMasterConfig:
apiServerArguments:
runtime-config:
- apis/settings.k8s.io/v1alpha1=true
storage-backend:
- etcd3
storage-media-type:
- application/vnd.kubernetes.protobuf
controllerArguments:
masterCount: 3
masterIP: 10.11.41.85
podEvictionTimeout:
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
schedulerArguments:
schedulerConfigFile: /etc/origin/master/scheduler.json
servicesNodePortRange: ""
servicesSubnet: 172.30.0.0/16
staticNodeNames: []
masterClients:
externalKubernetesClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
burst: 400
contentType: application/vnd.kubernetes.protobuf
qps: 200
externalKubernetesKubeConfig: ""
openshiftLoopbackClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
burst: 600
contentType: application/vnd.kubernetes.protobuf
qps: 300
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
masterPublicURL: https://hosting.wfp.org <----
networkConfig:
clusterNetworkCIDR: 10.128.0.0/14
clusterNetworks:
- cidr: 10.128.0.0/14
hostSubnetLength: 9
externalIPNetworkCIDRs:
- 0.0.0.0/0
hostSubnetLength: 9
networkPluginName: redhat/openshift-ovs-multitenant
serviceNetworkCIDR: 172.30.0.0/16
oauthConfig:
assetPublicURL: https://hosting.wfp.org/console/
grantConfig:
method: auto
identityProviders:
- challenge: true
login: true
mappingMethod: claim
name: htpasswd_auth
provider:
apiVersion: v1
file: /etc/origin/master/htpasswd
kind: HTPasswdPasswordIdentityProvider
masterCA: ca-bundle.crt
masterPublicURL: https://hosting.wfp.org <----
masterURL: https://wfpromshap21.global.wfp.org:8443
sessionConfig:
sessionMaxAgeSeconds: 3600
sessionName: ssn
sessionSecretsFile: /etc/origin/master/session-secrets.yaml
tokenConfig:
accessTokenMaxAgeSeconds: 86400
authorizeTokenMaxAgeSeconds: 500
pauseControllers: false
policyConfig:
bootstrapPolicyFile: /etc/origin/master/policy.json
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
projectConfig:
defaultNodeSelector: ""
projectRequestMessage: ""
projectRequestTemplate: ""
securityAllocator:
mcsAllocatorRange: s0:/2
mcsLabelsPerProject: 5
uidAllocatorRange: 1000000000-1999999999/10000
routingConfig:
subdomain: hosting.wfp.org
serviceAccountConfig:
limitSecretReferences: false
managedNames:
- default
- builder
- deployer
masterCA: ca-bundle.crt
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
- serviceaccounts.public.key
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
volumeConfig:
dynamicProvisioningEnabled: true
the strange PHP error message was due to another service listening on the
8443 port on the same host where nginx it's running !
Exploiting this post https://github.com/openshift/origin/issues/17456 our
nginx setup got now :
upstream openshift-cluster-webconsole {
ip_hash;
server wfpromshap21.global.wfp.org:8443;
server wfpromshap22.global.wfp.org:8443;
server wfpromshap23.global.wfp.org:8443;
}
server {
listen 10.11.40.99:80;
server_name hosting.wfp.org;
return 301 https://$server_name$request_uri;
}
server {
listen 10.11.40.99:443;
server_name hosting.wfp.org;
access_log /var/log/nginx/hosting-console-access.log;
#access_log off;
error_log /var/log/nginx/hosting-console-error.log crit;
include /data/nginx/includes.d/ssl-wfp.conf;
include /data/nginx/includes.d/error.conf;
include /data/nginx/includes.d/proxy.conf;
proxy_set_header Host $host;
location / {
proxy_pass https://openshift-cluster-webconsole;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
and it seems to work by nicely masking the 3 Web Consoles.
_______________________________________________
users mailing list
[email protected]
http://lists.openshift.redhat.com/openshiftmm/listinfo/users