root@ubuntu:/home/rajesh# kubectl get pods -n knative-serving
NAME READY STATUS RESTARTS AGE
activator-77cddd575c-dgvtz 0/1 CrashLoopBackOff 4 (93s ago) 11m
autoscaler-8555bc9579-wj5ph 0/1 Running 6 (49s ago) 11m
controller-756bdcdfb7-qvmjc 0/1 CrashLoopBackOff 4 (50s ago) 11m
domain-mapping-6b7d89b8b9-8cwcx 0/1 CrashLoopBackOff 4 (75s ago) 10m
domainmapping-webhook-7d8bdf476c-jf6xf 0/1 CrashLoopBackOff 7 (2m18s ago) 10m
net-istio-controller-7c5968d955-28chq 0/1 CrashLoopBackOff 4 (25s ago) 10m
net-istio-webhook-858d578f5f-dpxsr 0/1 Error 4 (2m24s ago) 10m
webhook-77ccd77dcc-kjkkg 0/1 CrashLoopBackOff 7 (2m44s ago) 10m
root@ubuntu:/home/rajesh# kubectl logs activator-77cddd575c-dgvtz -n knative-serving
2022/12/01 07:36:00 Registering 3 clients
2022/12/01 07:36:00 Registering 3 informer factories
2022/12/01 07:36:00 Registering 3 informers
root@ubuntu:/home/rajesh# kubectl describe pod activator-77cddd575c-dgvtz -n knative-serving
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 12m default-scheduler Successfully assigned knative-serving/activator-77cddd575c-dgvtz to worker-1
Warning FailedCreatePodSandBox 12m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8ec772008e6a3648f69d14eb3d5a1f6c15143ade36cd5d54619f8f68963270cf": error getting ClusterInformation: Get "https://10.96.0.1:443/apis/crd.projectcalico.org/v1/clusterinformations/default": net/http: TLS handshake timeout
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "74258a75a518a38f21b8a8bc8cd1a9e2369c2be3123399699342aec7b8e1b240": error getting ClusterInformation: Get "https://10.96.0.1:443/apis/crd.projectcalico.org/v1/clusterinformations/default": net/http: TLS handshake timeout
Normal Pulling 11m kubelet Pulling image "gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:d7f05e8bae04b1a55ab2f44735b974aa5bdcbd277f5f6f6fad6cc47864c3716f"
Normal Pulled 10m kubelet Successfully pulled image "gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:d7f05e8bae04b1a55ab2f44735b974aa5bdcbd277f5f6f6fad6cc47864c3716f" in 57.26505714s
Normal Created 10m kubelet Created container activator
Normal Started 10m kubelet Started container activator
Warning Unhealthy 9m42s (x4 over 10m) kubelet Liveness probe failed: Get "http://192.168.1.7:8012/": dial tcp 192.168.1.7:8012: connect: connection refused
Warning Unhealthy 2m27s (x100 over 10m) kubelet Readiness probe failed: Get "http://192.168.1.7:8012/": dial tcp 192.168.1.7:8012: connect: connection refused
root@ubuntu:/home/rajesh# kubectl logs controller-756bdcdfb7-qvmjc -n knative-serving
2022/12/01 07:54:30 Registering 5 clients
2022/12/01 07:54:30 Registering 5 informer factories
2022/12/01 07:54:30 Registering 14 informers
2022/12/01 07:54:30 Registering 9 controllers
2022/12/01 07:56:01 Error reading/parsing logging configuration: timed out waiting for the condition: Get "https://10.96.0.1:443/api/v1/namespaces/knative-serving/configmaps/config-logging": dial tcp 10.96.0.1:443: i/o timeout
kubectl describe pod controller-756bdcdfb7-qvmjc -n knative-serving
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 33m default-scheduler Successfully assigned knative-serving/controller-756bdcdfb7-qvmjc to worker-1
Warning FailedMount 32m kubelet MountVolume.SetUp failed for volume "kube-api-access-9d27j" : failed to fetch token: Post "https://192.168.1.13:6443/api/v1/namespaces/knative-serving/serviceaccounts/controller/token": read tcp 192.168.1.15:38366->192.168.1.13:6443: use of closed network connection
Warning FailedMount 32m kubelet MountVolume.SetUp failed for volume "kube-api-access-9d27j" : failed to fetch token: Post "https://192.168.1.13:6443/api/v1/namespaces/knative-serving/serviceaccounts/controller/token": read tcp 192.168.1.15:46438->192.168.1.13:6443: use of closed network connection
Warning FailedMount 32m kubelet MountVolume.SetUp failed for volume "kube-api-access-9d27j" : failed to fetch token: Post "https://192.168.1.13:6443/api/v1/namespaces/knative-serving/serviceaccounts/controller/token": read tcp 192.168.1.15:59522->192.168.1.13:6443: use of closed network connection
Warning FailedMount 32m kubelet MountVolume.SetUp failed for volume "kube-api-access-9d27j" : failed to fetch token: Post "https://192.168.1.13:6443/api/v1/namespaces/knative-serving/serviceaccounts/controller/token": read tcp 192.168.1.15:33944->192.168.1.13:6443: use of closed network connection
Normal Pulling 32m kubelet Pulling image "gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:9102224b906b702c2875a0360dbec1f073db0809faada35ffd15d1593f67552b"
Normal Pulled 31m kubelet Successfully pulled image "gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:9102224b906b702c2875a0360dbec1f073db0809faada35ffd15d1593f67552b" in 37.833153239s
Normal Started 26m (x4 over 31m) kubelet Started container controller
Normal Created 24m (x5 over 31m) kubelet Created container controller
Normal Pulled 24m (x4 over 30m) kubelet Container image "gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:9102224b906b702c2875a0360dbec1f073db0809faada35ffd15d1593f67552b" already present on machine
Warning BackOff 101s (x75 over 28m) kubelet
root@ubuntu:/home/rajesh# kubectl logs domain-mapping-6b7d89b8b9-8cwcx -n knative-serving
2022/12/01 07:53:50 Registering 4 clients
2022/12/01 07:53:50 Registering 3 informer factories
2022/12/01 07:53:50 Registering 4 informers
2022/12/01 07:53:50 Registering 1 controllers
2022/12/01 07:55:21 Error reading/parsing logging configuration: timed out waiting for the condition: Get "https://10.96.0.1:443/api/v1/namespaces/knative-serving/configmaps/config-logging": dial tcp 10.96.0.1:443: i/o timeout
kubectl describe pod domain-mapping-6b7d89b8b9-8cwcx -n knative-serving
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 34m default-scheduler Successfully assigned knative-serving/domain-mapping-6b7d89b8b9-8cwcx to worker-1
Normal Pulling 34m kubelet Pulling image "gcr.io/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:35633cb04a19f542a43b2dfd45609addec451d634372c1ee15c9ecb6a204bba4"
Normal Pulled 33m kubelet Successfully pulled image "gcr.io/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:35633cb04a19f542a43b2dfd45609addec451d634372c1ee15c9ecb6a204bba4" in 19.240964239s
Normal Created 26m (x5 over 33m) kubelet Created container domain-mapping
Normal Started 26m (x5 over 33m) kubelet Started container domain-mapping
Normal Pulled 26m (x4 over 32m) kubelet Container image "gcr.io/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:35633cb04a19f542a43b2dfd45609addec451d634372c1ee15c9ecb6a204bba4" already present on machine
Warning BackOff 3m54s (x74 over 30m) kubelet Back-off restarting failed container
Latest posts by rajeshkumar (see all)
- Gitlab Error: - March 8, 2023
- Kubernestes EKS Error: Readiness probe failed /app/grpc-health-probe -addr=:50051 - February 28, 2023
- SSL Error: no alternative certificate subject name matches target - February 24, 2023