很久之前就写过这篇笔记,一直没发出来,虽说这样的文章差不多已经烂大街了,还是可以看一下的。
⎈⎈
apt install -y bash-completion
yum install -y bash-completion
echo "source <(kubectl completion bash)" >> ~/.bashrc
echo "source <(kubeadm completion bash)" >> ~/.bashrc
source ~/.bashrc
modprobe ip_vs && modprobe ip_vs_rr && modprobe ip_vs_wrr && modprobe ip_vs_sh
rm -rf /var/lib/cni/
rm -rf /var/lib/kubelet/*
rm -rf /etc/cni/
ip link del cni0 && ip link del flannel.1 && ip link del kube-ipvs0
net.ipv6.conf.all.disable_ipv6 = 1
awk '$2 ~ path {print $2}' path=/var/lib/kubelet /proc/mounts | xargs -r umount
kubeadm init --kubernetes-version 1.11.1 --pod-network-cidr=10.244.0.0/16
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl run nginx --image=nginx:alpine --port=80 --replicas 1
kubectl expose deployment nginx --type=NodePort
kubectl run curl --image=vsxen/k8s --port=5201 -i --tty
TL.DR
#!/bin/bash
set -x
set -e
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn", "https://2h3po24q.mirror.aliyuncs.com"],
"max-concurrent-downloads": 10
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "10"
}
}
EOF
systemctl restart docker
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.11.1-0 kubeadm-1.11.1-0 kubectl-1.11.1-0 cri-tools-1.11.1-0
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt install kubeadm=1.11.1-00 kubelet=1.11.1-00 kubernetes-cni=0.6.0-00 cri-tools=1.11.1-00
KUBE_VERSION=v1.14.1
KUBE_PAUSE_VERSION=3.1
ETCD_VERSION=3.3.10
DNS_VERSION=1.3.1
GCR_URL=k8s.gcr.io
HUB_URL=mirrorgooglecontainers
images=(kube-proxy:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-apiserver:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION})
for imageName in ${images[@]} ; do
docker pull $HUB_URL/$imageName
docker tag $HUB_URL/$imageName $GCR_URL/$imageName
docker rmi $HUB_URL/$imageName
done
docker pull coredns/coredns:${DNS_VERSION}
docker tag coredns/coredns:${DNS_VERSION} $GCR_URL/coredns:${DNS_VERSION}
docker rmi coredns/coredns:${DNS_VERSION}
docker pull jmgao1983/flannel:v0.10.0-amd64
docker tag jmgao1983/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64
docker rmi jmgao1983/flannel:v0.10.0-amd64
systemctl enable docker.service
kubeadm init --kubernetes-version 1.11.1 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
一步一步装
安装 Docker
apt-get update
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") $(lsb_release -cs) stable"
apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 17.03 | head -1 | awk '{print $3}')
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce.x86_64 --showduplicates |sort -r
设置Docker代理
SSIP=192.168.12.100
sudo mkdir -p /etc/systemd/system/docker.service.d
cat > /etc/systemd/system/docker.service.d/http-proxy.conf <<EOF
[Service]
Environment="HTTP_PROXY=$SSIP:1080/"
EOF
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn", "https://2h3po24q.mirror.aliyuncs.com"],
"max-concurrent-downloads": 10
}
EOF
systemctl daemon-reload && systemctl restart docker
安装kubeadm
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
# 使用代理
sudo vi /etc/apt/apt.conf.d/proxy.conf
Acquire::https::Proxy "http://192.168.12.100:1080";
Acquire::https::Proxy "http://user:password@proxy.server:port/";
SSIP=192.168.12.102
curl --socks5 $SSIP:1080 -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
export http_proxy=socks5://$SSIP:1080
#后续apt会自动读取env,不需要设置代理
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
yum list kubeadm --showduplicates
yum install -y kubelet-1.10.7-0 kubeadm-1.10.7-0 kubectl-1.10.7-0 cri-tools-1.10.7-0
kubeadm 为kubelet新增的启动参数
cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
准备工作
关闭swap和防火墙,不关闭会报错[ERROR Swap]: running with swap on is not supported. Please disable swap。
swapoff -a
如果之前设置了代理,现在要取消掉unset http_proxy
,否则报错
Jul 26 09:52:34 ubuntu kubelet[3336]: E0726 09:52:34.391184 3336 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:455: Failed to list *v1.Service: Get https://192.168.200.140:6443/api/v1/services?limit=500&resourceVersion=0: net/http: TLS handshake timeout
Jul 26 09:52:34 ubuntu kubelet[3336]: E0726 09:52:34.400787 3336 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://192.168.200.140:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dubuntu&limit=500&resourceVersion=0: net/http: TLS handshake timeout
Jul 26 09:52:34 ubuntu kubelet[3336]: E0726 09:52:34.403551 3336 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:464: Failed to list *v1.Node: Get https://192.168.200.140:6443/api/v1/nodes?fieldSelector=metadata.name%3Dubuntu&limit=500&resourceVersion=0: net/http: TLS handshake timeout
Jul 26 09:52:35 ubuntu kubelet[3336]: W0726 09:52:35.846994 3336 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
Jul 26 09:52:35 ubuntu kubelet[3336]: E0726 09:52:35.847929 3336 kubelet.go:2110] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Jul 26 09:52:37 ubuntu kubelet[3336]: E0726 09:52:37.408007 3336 kubelet_node_status.go:103] Unable to register node "ubuntu" with API server: Post https://192.168.200.140:6443/api/v1/nodes: net/http: TLS handshake timeout
初始化静态pod
# 这里使用flannel预先规定的CIDR
kubeadm init --kubernetes-version 1.11.0 --pod-network-cidr=10.244.0.0/16
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.200.140:6443 --token e12o5m.ablnnhbsojrl1g5t --discovery-token-ca-cert-hash sha256:ad11adbac1f2a9f963c4bb8c88dcf6dfa4c330ad79bef59cae7613388cfe6413
生成的文件
├── admin.conf
├── controller-manager.conf
├── kubelet.conf
├── manifests
│ ├── etcd.yaml
│ ├── kube-apiserver.yaml
│ ├── kube-controller-manager.yaml
│ └── kube-scheduler.yaml
├── pki
│ ├── apiserver.crt
│ ├── apiserver-etcd-client.crt
│ ├── apiserver-etcd-client.key
│ ├── apiserver.key
│ ├── apiserver-kubelet-client.crt
│ ├── apiserver-kubelet-client.key
│ ├── ca.crt
│ ├── ca.key
│ ├── etcd
│ │ ├── ca.crt
│ │ ├── ca.key
│ │ ├── healthcheck-client.crt
│ │ ├── healthcheck-client.key
│ │ ├── peer.crt
│ │ ├── peer.key
│ │ ├── server.crt
│ │ └── server.key
│ ├── front-proxy-ca.crt
│ ├── front-proxy-ca.key
│ ├── front-proxy-client.crt
│ ├── front-proxy-client.key
│ ├── sa.key
│ └── sa.pub
└── scheduler.conf
组件的启动参数
manifests/etcd.yaml
- etcd
- --advertise-client-urls=https://127.0.0.1:2379
- --cert-file=/etc/kubernetes/pki/etcd/server.crt
- --client-cert-auth=true
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=https://127.0.0.1:2380
- --initial-cluster=ubuntu=https://127.0.0.1:2380
- --key-file=/etc/kubernetes/pki/etcd/server.key
- --listen-client-urls=https://127.0.0.1:2379
- --listen-peer-urls=https://127.0.0.1:2380
- --name=ubuntu
- --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
- --peer-client-cert-auth=true
- --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --snapshot-count=10000
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
image: k8s.gcr.io/etcd-amd64:3.2.18
manifests/kube-apiserver
spec:
containers:
- command:
- kube-apiserver
- --authorization-mode=Node,RBAC
- --advertise-address=192.168.200.140
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --disable-admission-plugins=PersistentVolumeLabel
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: k8s.gcr.io/kube-apiserver-amd64:v1.11.0
kube-controller-manager
spec:
containers:
- command:
- kube-controller-manager
- --address=127.0.0.1
- --allocate-node-cidrs=true
- --cluster-cidr=10.244.0.0/16
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --leader-elect=true
- --node-cidr-mask-size=24
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --use-service-account-credentials=true
image: k8s.gcr.io/kube-controller-manager-amd64:v1.11.0
manifests/kube-scheduler.
spec:
containers:
- command:
- kube-scheduler
- --address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=true
image: k8s.gcr.io/kube-scheduler-amd64:v1.11.0
查看状态
kubectl -n kube-system get po -o wide
NAME READY STATUS RESTARTS AGE IP NODE
coredns-78fcdf6894-495rm 0/1 Pending 0 15m <none> <none>
coredns-78fcdf6894-qvfvf 0/1 Pending 0 15m <none> <none>
etcd-ubuntu 1/1 Running 0 15m 192.168.200.140 ubuntu
kube-apiserver-ubuntu 1/1 Running 0 14m 192.168.200.140 ubuntu
kube-controller-manager-ubuntu 1/1 Running 0 14m 192.168.200.140 ubuntu
kube-proxy-csr55 1/1 Running 0 15m 192.168.200.140 ubuntu
kube-scheduler-ubuntu 1/1 Running 0 14m 192.168.200.140 ubuntu
自定义配置
应该仅适用于1.14版本
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
#token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: east1-monitor1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
apiServer:
timeoutForControlPlane: 4m0s
extraArgs:
audit-log-maxbackup: "10"
audit-log-maxsize: "100"
audit-log-path: "/var/log/kubernetes/kubernetes.audit"
audit-log-maxage: "7"
audit-policy-file: "/etc/kubernetes/audit.yml"
extraVolumes:
- name: "audit"
hostPath: "/etc/kubernetes/audit.yml"
mountPath: "/etc/kubernetes/audit.yml"
writable: true
pathType: File
- name: "log"
hostPath: "/var/log/kubernetes"
mountPath: "/var/log/kubernetes"
writable: true
pathType: Directory
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
imageRepository: k8s.gcr.io
kubernetesVersion: v1.14.0
controllerManager: {}
scheduler: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
ipvs:
excludeCIDRs: null
minSyncPeriod: 1s
scheduler: "rr"
syncPeriod: 30s
mode: ipvs
不得不说,kubeadm大大的降低了初始化k8s集群的难度,接下来我尝试从初始化输出的日志中探究kubeadn到底替我们做了哪些事情。
Master
安装前检查,检查包括IP,路由,端口,命令,镜像,然后生成证书和对应的static pod,等待pod启动并初始化,这一步信息比较多就不放了。然后把kubeadm的应答config放到了k8s的configmap之中,上传kubelet的配置,方便节点在加入的时候配置信息。
I0403 11:30:25.053837 25657 uploadconfig.go:109][upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0403 11:30:25.056662 25657 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config 404 Not Found in 1 milliseconds
I0403 11:30:25.061844 25657 round_trippers.go:438] POST https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps 201 Created in 4 milliseconds
I0403 11:30:25.070763 25657 uploadconfig.go:123] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
I0403 11:30:25.074150 25657 round_trippers.go:438] POST https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps 201 Created in 2 milliseconds
创建 kubeadm:nodes-kubeadm-config Role,赋予了system:nodes system:bootstrappers:kubeadm:default-node-token 读取kubeadmconfig的权限,对于前者,在加入的时候第一步就是读取kubeadm的配置。
# I0403 11:30:25.065774 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles 201 Created in 3 milliseconds
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:nodes-kubeadm-config
namespace: kube-system
rules:
- verbs:
- get
apiGroups:
- ''
resources:
- configmaps
resourceNames:
- kubeadm-config
# I0403 11:30:25.069802 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings 201 Created in 3 milliseconds
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:nodes-kubeadm-config
namespace: kube-system
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:bootstrappers:kubeadm:default-node-token
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeadm:nodes-kubeadm-config
添加 kubeadm:kubelet-config-1.14 role,赋予了system:nodes 和 system:bootstrappers:kubeadm:default-node-token 读取kubeadmconfig的权限,允许其读取kubelet 的配置。
# I0403 11:30:25.077100 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles 201 Created in 2 milliseconds
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:kubelet-config-1.14
namespace: kube-system
rules:
- verbs:
- get
apiGroups:
- ''
resources:
- configmaps
resourceNames:
- kubelet-config-1.14
---
# I0403 11:30:25.079638 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings 201 Created in 2 milliseconds
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:kubelet-config-1.14
namespace: kube-system
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:bootstrappers:kubeadm:default-node-token
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeadm:kubelet-config-1.14
注册节点,并禁止调度到此节点。
I0403 11:30:25.079743 25657 uploadconfig.go:128][upload-config] Preserving the CRISocket information for the control-plane node
I0403 11:30:25.079755 25657 patchnode.go:30] [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "east1-monitor1" as an annotation
I0403 11:30:25.582567 25657 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/nodes/east1-monitor1 200 OK in 2 milliseconds
I0403 11:30:25.590791 25657 round_trippers.go:438] PATCH https://192.168.1.11:6443/api/v1/nodes/east1-monitor1 200 OK in 4 milliseconds
[upload-certs] Skipping phase. Please see --experimental-upload-certs
[mark-control-plane] Marking the node east1-monitor1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node east1-monitor1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
I0403 11:30:26.093669 25657 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/nodes/east1-monitor1 200 OK in 2 milliseconds
I0403 11:30:26.100968 25657 round_trippers.go:438] PATCH https://192.168.1.11:6443/api/v1/nodes/east1-monitor1 200 OK in 4 milliseconds
创建bootstrap token,存到k8s中,允许system:bootstrappers:kubeadm:default-node-token
#[bootstrap-token] Using token: cr3n6a.5ndgdhxeyinfq7i0
#[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
#I0403 11:30:26.103085 25657 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/secrets/bootstrap-token-cr3n6a 404 Not Found in 1 milliseconds
#I0403 11:30:26.106375 25657 round_trippers.go:438] POST https://192.168.1.11:6443/api/v1/namespaces/kube-system/secrets 201 Created in 2 milliseconds
---
kind: Secret
apiVersion: v1
metadata:
name: bootstrap-token-o6k5na
namespace: kube-system
creationTimestamp:
data:
auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4=
description: VGhlIGRlZmF1bHQgYm9vdHN0cmFwIHRva2VuIGdlbmVyYXRlZCBieSAna3ViZWFkbSBpbml0Jy4=
expiration: MjAxOS0wNC0wOVQxOTozNjoyMCswODowMA==
token-id: bzZrNW5h
token-secret: ZzQzd3A1dHkzajJ5ZWxmMA==
usage-bootstrap-authentication: dHJ1ZQ==
usage-bootstrap-signing: dHJ1ZQ==
type: bootstrap.kubernetes.io/token
# [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
#I0403 11:30:26.110092 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings 201 Created in 2 milliseconds
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:kubelet-bootstrap
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:bootstrappers:kubeadm:default-node-token
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
# 有对 certificatesigningrequests.certificates.k8s.io[create get list watch] 的权限
#[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
#I0403 11:30:26.115312 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings 201 Created in 2 milliseconds
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:node-autoapprove-certificate-rotation
creationTimestamp:
subjects:
- kind: Group
name: system:nodes
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
生成kubectl cluster-info 所需要的信息,并存储到configmap之中,同时在node加入的时候需要读取这里的CA信息。
# [bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
#I0403 11:30:26.115417 25657 clusterinfo.go:46][bootstrap-token] loading admin kubeconfig
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cluster-info
namespace: kube-public
creationTimestamp:
data:
kubeconfig: |
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EUXdPREV4TXpVMU5Gb1hEVEk1TURRd05URXhNelUxTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTm1XClJhWWN1Z0F1VzlKV2NHVGpkOW12RkZnU1NWOGo3N2NoSzY0eEIyWEN0MzVwMkpSOEUzWkVBekdvZjNaUDRmcFYKVUVadWRCU3p6c25ZMGdUQ1QwL2F6SzI1ME1nLzZ4TmFmYXdJbytXSUNDS2NYYkRlbGRxS3FQbEorenNCekd4eQpnd2kwOTlzQXhPK0dzOVVTQndHNm1hcThZMjVvNlE5SER3RjlTdTRSdDlDaHpMNWRSL0ZHNXduY1VNWGxUK2kzCnVsbldOVWVxV3VGKzh4WGJsQ291b2NtUFZnc292cnZka1prVE5GbVFoVjhnYW9TR3I1eVNLNEpVelVFMXNrck8KbTBLbXIxZTQvQ05obEI2dmZjNEJ0S2JscFp6UzEzU1FBMktwK2I2ek9zVEhkc0ZWS1hQMlI4eFpXTHh1Q2ZMRApEQXorRmhpa0VqMWJsNU1YYmUwQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBTjF5cGJIYmRjVjZQWjBjL3ViOUg1a0JxK3AKSkJ6Wk40R2lmMHV5U2daM3RXUENiMVlqSjA5K3B1LzQ4ZE1zWHppdnFkNTVqaHFyM04yZ0RvOWE4Z2N2WXowMwp0RG1oR1kzTXgvbHBMcGhjMUZqMmdyanQ3WldHNzYzd2hsNkhkcFRuRVh1enhRT20ya1c3K210ZEFsRUZxaUtpCnRzdll4cDlmTHRyUkhwRGhZT1g4NUFzbFA4VGMvNXY4c0RFd2UzbGNBbndmQXBScGZCdWJ2cjVYN0tWTk9ZMnAKOGFnMTErL0hrUHIvSHJKTWlhcmVYeUdhL2Jac1lMTW8zUGdsZnJJOG1rNE5pZkc3bm9UbzN5b1k5RTBHWWI5UQpwTTl2bU8zbHkwMkxBV1pBSkJGMXVwVVJrWnhZZ3VEc1MwNms4c0tGRWptSWZXZjVxRnU4ZzRLS084Yz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.0.1:6443
name: ""
contexts: []
current-context: ""
kind: Config
preferences: {}
users: []
这一步主要是允许匿名用户访问上一步创建的CA的信息,然后就加入集群的时候使用。
# I0403 11:30:26.116136 25657 loader.go:359] Config loaded from file /etc/kubernetes/admin.conf
# I0403 11:30:26.116148 25657 clusterinfo.go:54] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig
# I0403 11:30:26.116599 25657 clusterinfo.go:66][bootstrap-token] creating/updating ConfigMap in kube-public namespace
# I0403 11:30:26.118774 25657 round_trippers.go:438] POST https://192.168.1.11:6443/api/v1/namespaces/kube-public/configmaps 201 Created in 2 milliseconds
# I0403 11:30:26.118929 25657 clusterinfo.go:80] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace
# I0403 11:30:26.124005 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles 201 Created in 4 milliseconds
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:bootstrap-signer-clusterinfo
namespace: kube-public
selfLink: "/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/kubeadm%3Abootstrap-signer-clusterinfo"
uid: 8770d896-59f2-11e9-9389-00163e132347
resourceVersion: '199'
creationTimestamp: '2019-04-08T11:36:20Z'
rules:
- verbs:
- get
apiGroups:
- ''
resources:
- configmaps
resourceNames:
- cluster-info
#I0403 11:30:26.127798 25657 round_trippers.go:438] POST https://192.168.1.11:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings 201 Created in 3 milliseconds
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeadm:bootstrap-signer-clusterinfo
namespace: kube-public
selfLink: "/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/kubeadm%3Abootstrap-signer-clusterinfo"
uid: 87713973-59f2-11e9-9389-00163e132347
resourceVersion: '200'
creationTimestamp: '2019-04-08T11:36:20Z'
subjects:
- kind: User
apiGroup: rbac.authorization.k8s.io
name: system:anonymous
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeadm:bootstrap-signer-clusterinfo
初始化Kube-proxy和Coredns
这个就不多说了,差不多都是sa,svc,deployment,role,rolebinding 五大件,然后就输出了相关信息。
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.11:6443 --token cr3n6a.5ndgdhxeyinfq7i0 \
--discovery-token-ca-cert-hash sha256:3484008720a27ba243c825114b9a2373fb1d307d42818d23989b84ca698e7514
node
首先还是一大批检查,过检之后从cluster-info里面读取相应的证书信息;
I0402 23:30:52.707010 2454 join.go:334][preflight] Fetching init configuration
I0402 23:30:52.707019 2454 join.go:603] [join] Discovering cluster-info
[discovery] Trying to connect to API Server "192.168.1.11:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.1.11:6443"
I0402 23:30:52.761085 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 53 milliseconds
[discovery] Requesting info from "https://192.168.1.11:6443" again to validate TLS against the pinned public key
I0402 23:30:52.853292 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 60 milliseconds
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.1.11:6443"
[discovery] Successfully established connection with API Server "192.168.1.11:6443"
I0402 23:30:52.854638 2454 join.go:610][join] Retrieving KubeConfig objects
接受相关配置信息,并写入本地,用token的参数创建bootstrap-kubeletconfig,重启kubelet读取相关配置。
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
I0402 23:30:52.868021 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config 200 OK in 12 milliseconds
I0402 23:30:52.881050 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config-1.13 200 OK in 12 milliseconds
I0402 23:30:52.895260 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps/kube-proxy 200 OK in 12 milliseconds
I0402 23:30:52.897995 2454 join.go:341][preflight] Running configuration dependant checks
I0402 23:30:52.898098 2454 join.go:478] [join] writing bootstrap kubelet config file at /etc/kubernetes/bootstrap-kubelet.conf
I0402 23:30:52.979339 2454 loader.go:359] Config loaded from file /etc/kubernetes/bootstrap-kubelet.conf
I0402 23:30:52.979784 2454 join.go:503] Stopping the kubelet
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
I0402 23:30:53.003579 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config-1.13 200 OK in 12 milliseconds
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0402 23:30:53.064636 2454 join.go:520] Starting the kubelet
[kubelet-start] Activating the kubelet service
等待API Server 统一CSR,然后把自己注册到节点之中。
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
I0402 23:30:54.229055 2454 loader.go:359] Config loaded from file /etc/kubernetes/kubelet.conf
I0402 23:30:54.266377 2454 loader.go:359] Config loaded from file /etc/kubernetes/kubelet.conf
I0402 23:30:54.267652 2454 join.go:538][join] preserving the crisocket information for the node
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "thanos" as an annotation
I0402 23:30:54.829059 2454 round_trippers.go:438] GET https://192.168.1.11:6443/api/v1/nodes/thanos 200 OK in 60 milliseconds
I0402 23:30:54.845547 2454 round_trippers.go:438] PATCH https://192.168.1.11:6443/api/v1/nodes/thanos 200 OK in 13 milliseconds
大功告成完成注册。
This node has joined the cluster:
- Certificate signing request was sent to apiserver and a response was received.
- The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
附录
协议 | 方向 | 端口 | 说明 |
---|---|---|---|
TCP | Inbound | 16443* | Load balancer Kubernetes API server port |
TCP | Inbound | 6443* | Kubernetes API server |
TCP | Inbound | 4001 | etcd listen client port |
TCP | Inbound | 2379-2380 | etcd server client API |
TCP | Inbound | 10250 | Kubelet API |
TCP | Inbound | 10251 | kube-scheduler |
TCP | Inbound | 10252 | kube-controller-manager |
TCP | Inbound | 10255 | Read-only Kubelet API (Deprecated) |
TCP | Inbound | 30000-32767 | NodePort Services |
获取ca证书sha256 编码hash值 |
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin
-outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
root@master:/etc/kubernetes/pki# cat sa.pub
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAystBIninWaQADb12t7p9
+9PoSGwzhvfhX1Pd9STxI5TKk8bSw5+I5apZJEi2FdZer5O9EtrxgDoUp9IBijyy
EgBbE+JwXd52IgKcSVkWaL8uACkqaxGAtUkxdofeVvp0PoK0sF+dmPDNLSGxH+MR
fF5qCzS4L0UQhAa35M/CR75mDFnKLUIphBGE5F8d8RxrOZAOPrqqqgvci5/TJq5K
jAwak2D1mzU7SJYedn0bvzwXPdXaiQjKHg1Fyi8Pw8utS6VEG2EZK1BKEXpkrz8n
4TZqZ5OajDZg/SjU79QV9W4OsF+WoR3eOPyr3DeSGkPXtG00ifbSgpx7pjV7CVoM
VQIDAQAB
-----END PUBLIC KEY-----
root@master:/etc/kubernetes/pki# openssl rsa -in ca.key -pubout
writing RSA key
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0mst0xEryhOLsiIB6MTy
yNjXKaFVFm8NafZUI+ZiRC7qFxsBsdT10OIKUIn5tFRUc7uxzcZzlRwJtwC7wrBe
4Jy1uN7VPrg5hgk7aqqdwtUFDhUVGtQOalFvqkVdVrolYCb+kDghjFR4chBNRng1
ldPuaGUCvyItltk8d5j68TDzf0V2LWDWdJnJ/k3GTK8PUlkYhrid8i5TyAmIO/A9
+rzQr8YCV1tnomodX88+jjjCZxXZ1np/dYtgo5XL7+zkWHucINbiVtG8sVI+sSkq
5fp4f/C7zGv78ieeRCWUjskaH1QoBVWCNug/r1qFe/G8Je1JGE+bMC4+YcYksXuq
WQIDAQAB
-----END PUBLIC KEY-----
Ref
使用kubeadm安装kubernetes1.7/1.8/1.9
https://blog.csdn.net/zhuchuangang/article/details/76572157
Aliyun Repo
https://www.jianshu.com/p/4b5f960a5bea
使用kubeadm搭建Kubernetes(1.10.2)集群(国内环境)
https://www.cnblogs.com/RainingNight/p/using-kubeadm-to-create-a-cluster.html