parekhcoder commented on issue #389:
URL: https://github.com/apache/solr-operator/issues/389#issuecomment-1015482161


   Below is the output of kubectl describe
   
   > Name:         solr-operator-b5548847b-f7x7w
   Namespace:    default
   Priority:     0
   Node:         pbg-vubs92/10.10.1.183
   Start Time:   Tue, 18 Jan 2022 14:40:16 +0000
   Labels:       control-plane=solr-operator
                 istio.io/rev=default
                 pod-template-hash=b5548847b
                 security.istio.io/tlsMode=istio
                 service.istio.io/canonical-name=solr-operator
                 service.istio.io/canonical-revision=latest
   Annotations:  kubectl.kubernetes.io/default-container: solr-operator
                 kubectl.kubernetes.io/default-logs-container: solr-operator
                 prometheus.io/path: /stats/prometheus
                 prometheus.io/port: 15020
                 prometheus.io/scrape: true
                 sidecar.istio.io/status:
                   
{"initContainers":["dns-probe","istio-init"],"containers":["istio-proxy"],"volumes":["cilium-unix-sock-dir","istio-envoy","istio-data","is...
   Status:       Pending
   IP:           10.0.0.101
   IPs:
     IP:           10.0.0.101
   Controlled By:  ReplicaSet/solr-operator-b5548847b
   Init Containers:
     dns-probe:
       Container ID:  
       Image:         busybox:1.31.1
       Image ID:      
       Port:          <none>
       Host Port:     <none>
       Command:
         sh
         -c
         max=120; i=0; until nslookup kube-dns.kube-system.svc.cluster.local; 
do i=$((i + 1)); if [ $i -eq $max ]; then echo timed-out; exit 1; else sleep 1; 
fi done 
       State:          Waiting
         Reason:       CreateContainerConfigError
       Ready:          False
       Restart Count:  0
       Environment:    <none>
       Mounts:
         /var/run/secrets/kubernetes.io/serviceaccount from 
kube-api-access-ds8dx (ro)
     istio-init:
       Container ID:  
       Image:         quay.io/cilium/istio_proxy:1.10.4
       Image ID:      
       Port:          <none>
       Host Port:     <none>
       Args:
         istio-iptables
         -p
         15001
         -z
         15006
         -u
         1337
         -m
         TPROXY
         -i
         *
         -x
         
         -b
         *
         -d
         15090,15021,15020
       State:          Waiting
         Reason:       PodInitializing
       Ready:          False
       Restart Count:  0
       Limits:
         cpu:     2
         memory:  1Gi
       Requests:
         cpu:        100m
         memory:     128Mi
       Environment:  <none>
       Mounts:
         /var/run/secrets/kubernetes.io/serviceaccount from 
kube-api-access-ds8dx (ro)
   Containers:
     solr-operator:
       Container ID:  
       Image:         apache/solr-operator:v0.5.0
       Image ID:      
       Port:          8080/TCP
       Host Port:     0/TCP
       Args:
         -zk-operator=true
         --tls-skip-verify-server=true
         --tls-watch-cert=true
         --health-probe-bind-address=:8081
         --metrics-bind-address=:8080
         --leader-elect=true
       State:          Waiting
         Reason:       PodInitializing
       Ready:          False
       Restart Count:  0
       Liveness:       http-get http://:15020/app-health/solr-operator/livez 
delay=15s timeout=1s period=20s #success=1 #failure=3
       Readiness:      http-get http://:15020/app-health/solr-operator/readyz 
delay=5s timeout=1s period=10s #success=1 #failure=3
       Environment:
         POD_NAMESPACE:  default (v1:metadata.namespace)
         POD_NAME:       solr-operator-b5548847b-f7x7w (v1:metadata.name)
       Mounts:
         /var/run/secrets/kubernetes.io/serviceaccount from 
kube-api-access-ds8dx (ro)
     istio-proxy:
       Container ID:  
       Image:         quay.io/cilium/istio_proxy:1.10.4
       Image ID:      
       Port:          15090/TCP
       Host Port:     0/TCP
       Args:
         proxy
         sidecar
         --domain
         $(POD_NAMESPACE).svc.cluster.local
         --serviceCluster
         solr-operator.default
         --proxyLogLevel=warning
         --proxyComponentLogLevel=misc:error
         --log_output_level=default:info
         --concurrency
         2
       State:          Waiting
         Reason:       PodInitializing
       Ready:          False
       Restart Count:  0
       Limits:
         cpu:     2
         memory:  1Gi
       Requests:
         cpu:      100m
         memory:   128Mi
       Readiness:  http-get http://:15021/healthz/ready delay=1s timeout=3s 
period=2s #success=1 #failure=30
       Environment:
         JWT_POLICY:                    third-party-jwt
         PILOT_CERT_PROVIDER:           istiod
         CA_ADDR:                       istiod.istio-system.svc:15012
         POD_NAME:                      solr-operator-b5548847b-f7x7w 
(v1:metadata.name)
         POD_NAMESPACE:                 default (v1:metadata.namespace)
         INSTANCE_IP:                    (v1:status.podIP)
         SERVICE_ACCOUNT:                (v1:spec.serviceAccountName)
         HOST_IP:                        (v1:status.hostIP)
         CANONICAL_SERVICE:              
(v1:metadata.labels['service.istio.io/canonical-name'])
         CANONICAL_REVISION:             
(v1:metadata.labels['service.istio.io/canonical-revision'])
         PROXY_CONFIG:                  {"interceptionMode":"TPROXY"}
                                        
         ISTIO_META_POD_PORTS:          [
                                            
{"name":"metrics","containerPort":8080,"protocol":"TCP"}
                                        ]
         ISTIO_META_APP_CONTAINERS:     solr-operator
         ISTIO_META_CLUSTER_ID:         Kubernetes
         ISTIO_META_INTERCEPTION_MODE:  TPROXY
         ISTIO_METAJSON_ANNOTATIONS:    {"prometheus.io/scrape":"true"}
                                        
         ISTIO_META_WORKLOAD_NAME:      solr-operator
         ISTIO_META_OWNER:              
kubernetes://apis/apps/v1/namespaces/default/deployments/solr-operator
         ISTIO_META_MESH_ID:            cluster.local
         TRUST_DOMAIN:                  cluster.local
         ISTIO_PROMETHEUS_ANNOTATIONS:  {"scrape":"true","path":"","port":""}
         ISTIO_KUBE_APP_PROBERS:        
{"/app-health/solr-operator/livez":{"httpGet":{"path":"/healthz","port":8081,"scheme":"HTTP"},"timeoutSeconds":1},"/app-health/solr-operator/readyz":{"httpGet":{"path":"/readyz","port":8081,"scheme":"HTTP"},"timeoutSeconds":1}}
       Mounts:
         /etc/istio/pod from istio-podinfo (rw)
         /etc/istio/proxy from istio-envoy (rw)
         /var/lib/istio/data from istio-data (rw)
         /var/run/cilium from cilium-unix-sock-dir (rw)
         /var/run/secrets/istio from istiod-ca-cert (rw)
         /var/run/secrets/kubernetes.io/serviceaccount from 
kube-api-access-ds8dx (ro)
         /var/run/secrets/tokens from istio-token (rw)
   Conditions:
     Type              Status
     Initialized       False 
     Ready             False 
     ContainersReady   False 
     PodScheduled      True 
   Volumes:
     cilium-unix-sock-dir:
       Type:          HostPath (bare host directory volume)
       Path:          /var/run/cilium
       HostPathType:  
     istio-envoy:
       Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
       Medium:     Memory
       SizeLimit:  <unset>
     istio-data:
       Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
       Medium:     
       SizeLimit:  <unset>
     istio-podinfo:
       Type:  DownwardAPI (a volume populated by information about the pod)
       Items:
         metadata.labels -> labels
         metadata.annotations -> annotations
         limits.cpu -> cpu-limit
         requests.cpu -> cpu-request
     istio-token:
       Type:                    Projected (a volume that contains injected data 
from multiple sources)
       TokenExpirationSeconds:  43200
     istiod-ca-cert:
       Type:      ConfigMap (a volume populated by a ConfigMap)
       Name:      istio-ca-root-cert
       Optional:  false
     kube-api-access-ds8dx:
       Type:                    Projected (a volume that contains injected data 
from multiple sources)
       TokenExpirationSeconds:  3607
       ConfigMapName:           kube-root-ca.crt
       ConfigMapOptional:       <nil>
       DownwardAPI:             true
   QoS Class:                   Burstable
   Node-Selectors:              <none>
   Tolerations:                 node.kubernetes.io/not-ready:NoExecute 
op=Exists for 300s
                                node.kubernetes.io/unreachable:NoExecute 
op=Exists for 300s
   Events:
     Type     Reason            Age                 From               Message
     ----     ------            ----                ----               -------
     Warning  FailedScheduling  6m36s               default-scheduler  0/1 
nodes are available: 1 node(s) were unschedulable.
     Warning  FailedScheduling  2m (x3 over 5m30s)  default-scheduler  0/1 
nodes are available: 1 node(s) were unschedulable.
     Normal   Scheduled         45s                 default-scheduler  
Successfully assigned default/solr-operator-b5548847b-f7x7w to pbg-vubs92
     Normal   Pulled            11s (x5 over 44s)   kubelet            
Container image "busybox:1.31.1" already present on machine
     Warning  Failed            11s (x5 over 44s)   kubelet            **Error: 
container has runAsNonRoot and image will run as root** (pod: 
"solr-operator-b5548847b-f7x7w_default(c8c06a03-ead6-4248-a6e2-23ad3550193d)", 
container: dns-probe)
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to