本文记录自己使用K8S时的相关操作命令。
查看cluster-info
[root@k8s-master-1 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.17.130:6443
CoreDNS is running at https://192.168.17.130:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
查看cluster-info的dump信息
# 显示的内容太多,故取前25行做演示
[root@k8s-master-1 ~]# kubectl cluster-info dump | head -n 25
{
"kind": "NodeList",
"apiVersion": "v1",
"metadata": {
"resourceVersion": "39548"
},
"items": [
{
"metadata": {
"name": "k8s-master-1",
"uid": "3c39ab3c-7b3c-45c7-b6f6-7625dd035a11",
"resourceVersion": "39373",
"creationTimestamp": "2021-05-04T07:11:18Z",
"labels": {
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/os": "linux",
"kubernetes.io/arch": "amd64",
"kubernetes.io/hostname": "k8s-master-1",
"kubernetes.io/os": "linux",
"node-role.kubernetes.io/control-plane": "",
"node-role.kubernetes.io/master": "",
"node.kubernetes.io/exclude-from-external-load-balancers": ""
},
…………
查看pods
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-5pvll 1/1 Running 1 22h
nginx-deployment-784894895c-vfnp7 1/1 Running 1 22h
查看deployment
[root@k8s-master-1 ~]# kubectl get deployment -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
coredns 2/2 2 2 23h
查看services
[root@k8s-master-1 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 23h
nginx-deployment NodePort 10.1.126.251 <none> 80:30660/TCP 22h
[root@k8s-master-1 ~]# kubectl get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 23h <none>
nginx-deployment NodePort 10.1.126.251 <none> 80:30660/TCP 22h k8s-app=nginx-deployment
[root@k8s-master-1 ~]# kubectl get svc -n kube-system -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kube-dns ClusterIP 10.1.0.10 <none> 53/UDP,53/TCP,9153/TCP 23h k8s-app=kube-dns
查看nodes
[root@k8s-master-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready control-plane,master 23h v1.21.0
k8s-node-1 Ready <none> 23h v1.21.0
k8s-node-2 Ready <none> 23h v1.21.0
[root@k8s-master-1 ~]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master-1 Ready control-plane,master 23h v1.21.0 192.168.17.130 <none> CentOS Linux 7 (Core) 3.10.0-1160.25.1.el7.x86_64 docker://20.10.6
k8s-node-1 Ready <none> 23h v1.21.0 192.168.17.131 <none> CentOS Linux 7 (Core) 3.10.0-1160.25.1.el7.x86_64 docker://20.10.6
k8s-node-2 Ready <none> 23h v1.21.0 192.168.17.133 <none> CentOS Linux 7 (Core) 3.10.0-1160.25.1.el7.x86_64 docker://20.10.6
查看Service Account
[root@k8s-master-1 ~]# kubectl get sa --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 23h
kube-node-lease default 1 23h
kube-public default 1 23h
kube-system attachdetach-controller 1 23h
kube-system bootstrap-signer 1 23h
kube-system certificate-controller 1 23h
kube-system clusterrole-aggregation-controller 1 23h
kube-system coredns 1 23h
kube-system cronjob-controller 1 23h
kube-system daemon-set-controller 1 23h
kube-system default 1 23h
kube-system deployment-controller 1 23h
kube-system disruption-controller 1 23h
kube-system endpoint-controller 1 23h
kube-system endpointslice-controller 1 23h
kube-system endpointslicemirroring-controller 1 23h
kube-system ephemeral-volume-controller 1 23h
kube-system expand-controller 1 23h
kube-system flannel 1 23h
kube-system generic-garbage-collector 1 23h
kube-system horizontal-pod-autoscaler 1 23h
kube-system job-controller 1 23h
kube-system kube-proxy 1 23h
kube-system namespace-controller 1 23h
kube-system node-controller 1 23h
kube-system persistent-volume-binder 1 23h
kube-system pod-garbage-collector 1 23h
kube-system pv-protection-controller 1 23h
kube-system pvc-protection-controller 1 23h
kube-system replicaset-controller 1 23h
kube-system replication-controller 1 23h
kube-system resourcequota-controller 1 23h
kube-system root-ca-cert-publisher 1 23h
kube-system service-account-controller 1 23h
kube-system service-controller 1 23h
kube-system statefulset-controller 1 23h
kube-system token-cleaner 1 23h
kube-system ttl-after-finished-controller 1 23h
kube-system ttl-controller 1 23h
kubernetes-dashboard admin-user 1 23h
kubernetes-dashboard default 1 23h
kubernetes-dashboard kubernetes-dashboard 1 23h
查看cluster DNS Services信息
[root@k8s-master-1 ~]# kubectl get service -l k8s-app=kube-dns --namespace=kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.1.0.10 <none> 53/UDP,53/TCP,9153/TCP 23h
查看cluster DNS replication controllers
[root@k8s-master-1 ~]# kubectl get pod --selector k8s-app=kube-dns --namespace=kube-system
NAME READY STATUS RESTARTS AGE
coredns-558bd4d5db-drmtf 1/1 Running 1 23h
coredns-558bd4d5db-pgd7r 1/1 Running 1 23h
查看components
[root@k8s-master-1 ~]# kubectl -s https://192.168.17.130:6443 get componentstatus
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
etcd-0 Healthy {"health":"true"}
此处Unhealthy是因为相关服务配置文件kube-scheduler.yaml及kube-controller-manager.yaml默认的监听端口非10251和10252所致
查看endpoint
[root@k8s-master-1 ~]# kubectl get endpoints
NAME ENDPOINTS AGE
kubernetes 192.168.17.130:6443 23h
nginx-deployment 10.244.1.4:30080,10.244.2.5:30080 22h
查看node 列表
[root@k8s-master-1 ~]# kubectl -s https://192.168.17.130:6443 get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready control-plane,master 23h v1.21.0
k8s-node-1 Ready <none> 23h v1.21.0
k8s-node-2 Ready <none> 23h v1.21.0
[root@k8s-master-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready control-plane,master 23h v1.21.0
k8s-node-1 Ready <none> 23h v1.21.0
k8s-node-2 Ready <none> 23h v1.21.0
查看node详细信息
[root@k8s-master-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready control-plane,master 23h v1.21.0
k8s-node-1 Ready <none> 23h v1.21.0
k8s-node-2 Ready <none> 23h v1.21.0
[root@k8s-master-1 ~]# kubectl describe node k8s-node-1
Name: k8s-node-1
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=k8s-node-1
kubernetes.io/os=linux
Annotations: flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"66:11:ba:1b:7b:81"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 192.168.17.131
kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 04 May 2021 15:12:23 +0800
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: k8s-node-1
AcquireTime: <unset>
RenewTime: Wed, 05 May 2021 14:58:21 +0800
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Wed, 05 May 2021 12:51:13 +0800 Wed, 05 May 2021 12:51:13 +0800 FlannelIsUp Flannel is running on this node
MemoryPressure False Wed, 05 May 2021 14:56:21 +0800 Wed, 05 May 2021 12:50:48 +0800 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 05 May 2021 14:56:21 +0800 Wed, 05 May 2021 12:50:48 +0800 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 05 May 2021 14:56:21 +0800 Wed, 05 May 2021 12:50:48 +0800 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 05 May 2021 14:56:21 +0800 Wed, 05 May 2021 12:50:48 +0800 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.17.131
Hostname: k8s-node-1
Capacity:
cpu: 1
ephemeral-storage: 17394Mi
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 1862992Ki
pods: 110
Allocatable:
cpu: 1
ephemeral-storage: 16415037823
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 1760592Ki
pods: 110
System Info:
Machine ID: 6ab2830113da4c6eabb6a7e39800eb6b
System UUID: 6B9A4D56-D404-DC0B-087E-C3D077B94945
Boot ID: 719953b3-aa24-42c1-bcad-8d6aef62119a
Kernel Version: 3.10.0-1160.25.1.el7.x86_64
OS Image: CentOS Linux 7 (Core)
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://20.10.6
Kubelet Version: v1.21.0
Kube-Proxy Version: v1.21.0
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (4 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default nginx-deployment-784894895c-5pvll 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22h
kube-system kube-flannel-ds-mnzwv 100m (10%) 100m (10%) 50Mi (2%) 50Mi (2%) 23h
kube-system kube-proxy-xqhr7 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23h
kubernetes-dashboard dashboard-metrics-scraper-5594697f48-gn9c8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23h
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 100m (10%) 100m (10%)
memory 50Mi (2%) 50Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events: <none>
查看kubelet配置信息
[root@k8s-master-1 ~]# kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://192.168.17.130:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
查看config(kubeadm)
[root@k8s-master-1 ~]# kubeadm config view
Command "view" is deprecated, This command is deprecated and will be removed in a future release, please use 'kubectl get cm -o yaml -n kube-system kubeadm-config' to get the kubeadm config directly.
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.21.0
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.1.0.0/16
scheduler: {}
列出所需要的镜像列表
[root@k8s-master-1 ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.21.0
k8s.gcr.io/kube-controller-manager:v1.21.0
k8s.gcr.io/kube-scheduler:v1.21.0
k8s.gcr.io/kube-proxy:v1.21.0
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
查看默认初始化参数配置
[root@k8s-master-1 ~]# kubeadm config print init-defaults
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.2.3.4
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: node
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: 1.21.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
查看pod的日志
1、查看指定pod的日志
kubectl logs <pod_name>
kubectl logs -f <pod_name> #类似tail -f的方式查看(tail -f 命令实时查看日志文件)
如下:
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-5pvll 1/1 Running 1 22h
nginx-deployment-784894895c-vfnp7 1/1 Running 1 22h
[root@k8s-master-1 ~]#
[root@k8s-master-1 ~]# kubectl logs nginx-deployment-784894895c-5pvll
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
[root@k8s-master-1 ~]#
[root@k8s-master-1 ~]# kubectl logs -f nginx-deployment-784894895c-5pvll
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
2、查看指定pod中指定容器的日志
kubectl logs <pod_name> -c <container_name>
PS:查看Docker容器日志
docker logs <container_id>
查看pod的yaml文件
查看pod的yaml文件
kubectl get pod <pod-name> -n <ns-name> -o yaml
如下:
[root@k8s-master-1 ~]# kubectl get pod -n kube-system kube-apiserver-k8s-master-1 -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.17.130:6443
kubernetes.io/config.hash: 2d2c26864f05b6ee3c5b4b229e7b8fc0
kubernetes.io/config.mirror: 2d2c26864f05b6ee3c5b4b229e7b8fc0
kubernetes.io/config.seen: "2021-05-04T15:11:21.521769538+08:00"
kubernetes.io/config.source: file
creationTimestamp: "2021-05-04T07:11:22Z"
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver-k8s-master-1
namespace: kube-system
ownerReferences:
- apiVersion: v1
controller: true
kind: Node
name: k8s-master-1
uid: 3c39ab3c-7b3c-45c7-b6f6-7625dd035a11
resourceVersion: "31039"
uid: 923d618d-78bc-4117-8cdf-a79563827145
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=192.168.17.130
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.1.0.0/16
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: k8s.gcr.io/kube-apiserver:v1.21.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 192.168.17.130
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: 192.168.17.130
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
successThreshold: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: 192.168.17.130
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
nodeName: k8s-master-1
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2021-05-04T07:11:22Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2021-05-05T04:49:53Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2021-05-05T04:49:53Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2021-05-04T07:11:22Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://777c2a1a5458c5daa3f8250df4fe8f97ac955bc11f507f5522c2eb9196cd48bd
image: k8s.gcr.io/kube-apiserver:v1.21.0
imageID: docker-pullable://k8s.gcr.io/kube-apiserver@sha256:828fefd9598ed865d45364d1be859c87aabfa445b03b350e3440d143bd21bca9
lastState:
terminated:
containerID: docker://e1f524a825d9541b595dc7176fc1b4f1044f243522f3a79a4c30446f49a720d7
exitCode: 255
finishedAt: "2021-05-05T04:48:22Z"
reason: Error
startedAt: "2021-05-04T07:11:14Z"
name: kube-apiserver
ready: true
restartCount: 2
started: true
state:
running:
startedAt: "2021-05-05T04:49:23Z"
hostIP: 192.168.17.130
phase: Running
podIP: 192.168.17.130
podIPs:
- ip: 192.168.17.130
qosClass: Burstable
startTime: "2021-05-04T07:11:22Z"
登录容器
登录容器的时候需要注意到容器支持的shell是什么。
kubectl exec -it <pod-name> -n <ns-name> bash
kubectl exec -it <pod-name> -n <ns-name> sh
如下:
[root@k8s-master-1 ~]# kubectl get ns
NAME STATUS AGE
default Active 24h
kube-node-lease Active 24h
kube-public Active 24h
kube-system Active 24h
kubernetes-dashboard Active 24h
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-5pvll 1/1 Running 1 23h
nginx-deployment-784894895c-vfnp7 1/1 Running 1 23h
[root@k8s-master-1 ~]# kubectl exec -it nginx-deployment-784894895c-5pvll -n default /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-784894895c-5pvll:/# ls
bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var
boot docker-entrypoint.d etc lib media opt root sbin sys usr
root@nginx-deployment-784894895c-5pvll:/#
若登录时报如下错误,说明shell类型不对:
kubectl OCI runtime exec failed: exec failed: container_linux.go:345: starting container process ca
根据yaml创建资源
#根据 yaml 创建资源, apply 可以重复执行,create 不行
kubectl create -f pod.yaml
kubectl apply -f pod.yaml
根据yaml删除pod
#基于 pod.yaml 定义的名称删除 pod
kubectl delete -f pod.yaml
根据label删除pod和service
#删除所有包含某个 label 的pod 和 service
kubectl delete pod,svc -l name=<label-name>
删除pod
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-5pvll 1/1 Running 1 23h
nginx-deployment-784894895c-vfnp7 1/1 Running 1 23h
[root@k8s-master-1 ~]# kubectl delete pod nginx-deployment-784894895c-5pvll
pod "nginx-deployment-784894895c-5pvll" deleted
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-vfnp7 1/1 Running 1 23h
查看node或pod的资源使用率
[root@k8s-master-1 ~]# kubectl top pods
……
[root@k8s-master-1 ~]# kubectl top nodes
……
查看或编辑pod的yaml文件
#编辑pod的yaml文件
kubectl get deployment -n <ns-name>
kubectl edit depolyment <pod-name> -n <ns-name> -o yaml
如下:
[root@k8s-master-1 ~]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 2/2 2 2 23h
[root@k8s-master-1 ~]# kubectl get deployment -n default
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 2/2 2 2 23h
[root@k8s-master-1 ~]# kubectl get deployment -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
coredns 2/2 2 2 24h
[root@k8s-master-1 ~]# kubectl get deployment nginx-deployment -n default -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2021-05-04T08:23:43Z"
generation: 1
labels:
k8s-app: nginx-deployment
name: nginx-deployment
namespace: default
resourceVersion: "31570"
uid: 47fe9d27-afc2-4f16-b2c0-8f8260e14710
spec:
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: nginx-deployment
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: nginx-deployment
name: nginx-deployment
spec:
containers:
- image: nginx:1.19.8
imagePullPolicy: IfNotPresent
name: nginx-deployment
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: default-token-w79mr
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2021-05-04T08:23:43Z"
lastUpdateTime: "2021-05-04T08:24:37Z"
message: ReplicaSet "nginx-deployment-784894895c" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2021-05-05T04:52:35Z"
lastUpdateTime: "2021-05-05T04:52:35Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 1
readyReplicas: 2
replicas: 2
updatedReplicas: 2
[root@k8s-master-1 ~]# kubectl edit deployment nginx-deployment -n default -o yaml
进入POD
[root@k8s-master-1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-784894895c-5pvll 1/1 Running 1 23h
nginx-deployment-784894895c-vfnp7 1/1 Running 1 23h
[root@k8s-master-1 ~]# kubectl exec -it nginx-deployment-784894895c-5pvll bash -n default
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-784894895c-5pvll:/# pwd
/